use crate::cli::commands::{KubernetesCmd, KubernetesSubCommand};
use crate::logging::{log_error, log_info, log_success};
use crate::utils::command_exists;
use colored::Colorize;
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use std::path::PathBuf;
use tokio::fs;
use tokio::process::Command;
const COMMAND_NAME: &str = "kubernetes";
#[derive(Debug, Deserialize)]
struct NodeCondition {
#[serde(rename = "type")]
kind: Option<String>,
status: Option<String>,
}
#[derive(Debug, Deserialize)]
struct NodeStatus {
conditions: Option<Vec<NodeCondition>>,
capacity: Option<HashMap<String, String>>,
allocatable: Option<HashMap<String, String>>,
}
#[derive(Debug, Deserialize)]
struct NodeMeta {
name: Option<String>,
}
#[derive(Debug, Deserialize)]
struct NodeItem {
metadata: Option<NodeMeta>,
status: Option<NodeStatus>,
}
#[derive(Debug, Deserialize)]
struct NodeList {
items: Vec<NodeItem>,
}
pub async fn run_kubernetes(cmd: KubernetesCmd, debug: bool) -> Result<(), String> {
match cmd.command {
KubernetesSubCommand::Check {
context,
namespace,
offline,
} => check_cluster(context, namespace, offline, debug).await,
KubernetesSubCommand::Generate {
name,
image,
port,
replicas,
namespace,
output,
host,
} => generate_bundle(name, image, port, replicas, namespace, output, host, debug).await,
KubernetesSubCommand::Apply {
file,
context,
namespace,
dry_run,
} => apply_bundle(file, context, namespace, dry_run, debug).await,
KubernetesSubCommand::Status { namespace, context } => {
status(namespace, context, debug).await
}
}
}
async fn ensure_kubectl() -> Result<(), String> {
if command_exists("kubectl") {
Ok(())
} else {
Err("kubectl is required but was not found in PATH".to_string())
}
}
fn build_args(base: &[&str], context: Option<&String>, namespace: Option<&String>) -> Vec<String> {
let mut args: Vec<String> = base.iter().map(|s| s.to_string()).collect();
if let Some(ctx) = context {
args.push("--context".to_string());
args.push(ctx.clone());
}
if let Some(ns) = namespace {
args.push("-n".to_string());
args.push(ns.clone());
}
args
}
async fn run_kubectl(
base: &[&str],
context: Option<&String>,
namespace: Option<&String>,
) -> Result<String, String> {
ensure_kubectl().await?;
let args = build_args(base, context, namespace);
let output = Command::new("kubectl")
.args(&args)
.output()
.await
.map_err(|e| {
format!(
"Failed to execute kubectl with args {:?}: {}",
args,
e.to_string()
)
})?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Err(format!("kubectl {:?} failed: {}", args, stderr))
}
}
async fn run_kubectl_json(
base: &[&str],
context: Option<&String>,
namespace: Option<&String>,
) -> Result<Value, String> {
let stdout = run_kubectl(base, context, namespace).await?;
serde_json::from_str(&stdout).map_err(|e| format!("Failed to parse kubectl JSON: {}", e))
}
async fn check_cluster(
context: Option<String>,
namespace: String,
offline: bool,
debug: bool,
) -> Result<(), String> {
ensure_kubectl().await.map_err(|e| {
let _ = log_error(COMMAND_NAME, "kubectl missing", Some(&e));
e
})?;
let _ = log_info(COMMAND_NAME, "Starting cluster check", None).await;
println!(
"{}",
"Kubernetes feature flag enabled (experimental)".bright_blue()
);
if offline {
println!(
"{}",
"Offline mode: verified kubectl binary presence. Skipping live cluster calls.".yellow()
);
return Ok(());
}
let ctx_str = context.clone();
let version = run_kubectl_json(&["version", "--output=json"], ctx_str.as_ref(), None).await?;
let client = version["clientVersion"]["gitVersion"]
.as_str()
.unwrap_or("unknown");
let server = version["serverVersion"]["gitVersion"]
.as_str()
.unwrap_or("unknown");
let current_context =
run_kubectl(&["config", "current-context"], ctx_str.as_ref(), None).await?;
let nodes: NodeList = serde_json::from_value(
run_kubectl_json(&["get", "nodes", "-o", "json"], ctx_str.as_ref(), None).await?,
)
.unwrap_or(NodeList { items: vec![] });
let ready_nodes = nodes
.items
.iter()
.filter(|n| {
n.status
.as_ref()
.and_then(|s| s.conditions.as_ref())
.and_then(|conds| {
conds.iter().find(|c| {
c.kind.as_deref() == Some("Ready") && c.status.as_deref() == Some("True")
})
})
.is_some()
})
.count();
let node_total = nodes.items.len();
println!("{} {}", "Context:".bright_blue(), current_context.trim());
println!(
"{} client={} server={}",
"Versions:".bright_blue(),
client,
server
);
println!(
"{} {}/{} Ready",
"Nodes:".bright_blue(),
ready_nodes,
node_total
);
let ns_arg = Some(namespace.clone());
let pods = run_kubectl(
&[
"get",
"pods",
"--no-headers",
"-o",
"custom-columns=NAME:.metadata.name,STATUS:.status.phase",
],
ctx_str.as_ref(),
ns_arg.as_ref(),
)
.await
.unwrap_or_else(|_| "No pods listed (namespace may be empty)".to_string());
println!("{} {}", "Namespace:".bright_blue(), namespace);
println!("{}", pods);
let _ = log_success(COMMAND_NAME, "Cluster check complete", None).await;
if debug {
let _ = log_info(COMMAND_NAME, "Cluster check ran in debug mode", None).await;
}
Ok(())
}
async fn generate_bundle(
name: String,
image: String,
port: u16,
replicas: u16,
namespace: String,
output: String,
host: Option<String>,
_debug: bool,
) -> Result<(), String> {
let _ = log_info(COMMAND_NAME, "Generating manifest bundle", Some(&name)).await;
let manifest = render_manifest(&name, &image, port, replicas, &namespace, host.as_deref());
let path = PathBuf::from(&output);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.await
.map_err(|e| format!("Failed to create directory {}: {}", parent.display(), e))?;
}
fs::write(&path, manifest)
.await
.map_err(|e| format!("Failed to write manifest {}: {}", path.display(), e))?;
println!("{} {}", "Wrote manifest bundle to".green(), path.display());
println!(
"- Deployment, Service, ServiceAccount, Role/RoleBinding, NetworkPolicy{}",
if host.is_some() { ", Ingress" } else { "" }
);
println!(
"- Safe defaults: runAsNonRoot, seccomp=RuntimeDefault, probes, requests/limits, namespace-scoped RBAC"
);
let _ = log_success(COMMAND_NAME, "Manifest bundle generated", Some(&output)).await;
Ok(())
}
async fn apply_bundle(
file: String,
context: Option<String>,
namespace: Option<String>,
dry_run: bool,
_debug: bool,
) -> Result<(), String> {
let mut args: Vec<&str> = vec!["apply", "-f"];
let path = PathBuf::from(&file);
if !path.exists() {
return Err(format!("Manifest file not found: {}", file));
}
let file_str = path
.to_str()
.ok_or_else(|| "Invalid manifest path".to_string())?;
args.push(file_str);
if dry_run {
args.push("--dry-run=server");
}
let output = run_kubectl(&args, context.as_ref(), namespace.as_ref()).await?;
println!("{}", output);
let _ = log_success(COMMAND_NAME, "kubectl apply completed", Some(&file)).await;
Ok(())
}
async fn status(namespace: String, context: Option<String>, _debug: bool) -> Result<(), String> {
let ns = Some(namespace.clone());
let output = run_kubectl(
&["get", "deploy,svc,pods", "-o", "wide"],
context.as_ref(),
ns.as_ref(),
)
.await?;
println!("{} {}", "Namespace:".bright_blue(), namespace);
println!("{}", output);
let _ = log_info(COMMAND_NAME, "Rendered namespace status", None).await;
Ok(())
}
fn render_manifest(
name: &str,
image: &str,
port: u16,
replicas: u16,
namespace: &str,
host: Option<&str>,
) -> String {
let labels = format!(
"app.kubernetes.io/name: {name}\n app.kubernetes.io/managed-by: xbp\n xbp.dev/feature: kubernetes\n xbp.dev/profile: experimental"
);
let mut docs: Vec<String> = Vec::new();
if namespace != "default" {
docs.push(format!(
r#"apiVersion: v1
kind: Namespace
metadata:
name: {namespace}
labels:
{labels}
---
"#,
namespace = namespace,
labels = labels
));
}
docs.push(format!(
r#"apiVersion: v1
kind: ServiceAccount
metadata:
name: {name}-sa
namespace: {namespace}
labels:
{labels}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {name}-reader
namespace: {namespace}
labels:
{labels}
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {name}-reader
namespace: {namespace}
labels:
{labels}
subjects:
- kind: ServiceAccount
name: {name}-sa
namespace: {namespace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {name}-reader
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {name}
namespace: {namespace}
labels:
{labels}
spec:
replicas: {replicas}
selector:
matchLabels:
app.kubernetes.io/name: {name}
template:
metadata:
labels:
{labels}
annotations:
xbp.dev/generated-by: kubernetes-feature
spec:
serviceAccountName: {name}-sa
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: {name}
image: {image}
imagePullPolicy: IfNotPresent
ports:
- containerPort: {port}
name: http
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
resources:
requests:
cpu: \"100m\"
memory: \"128Mi\"
limits:
cpu: \"500m\"
memory: \"512Mi\"
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 2
readinessProbe:
httpGet:
path: /readyz
port: http
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
---
apiVersion: v1
kind: Service
metadata:
name: {name}
namespace: {namespace}
labels:
{labels}
spec:
selector:
app.kubernetes.io/name: {name}
ports:
- name: http
port: {port}
targetPort: {port}
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {name}-default-deny
namespace: {namespace}
labels:
{labels}
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: {name}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {{}}
ports:
- protocol: TCP
port: {port}
egress:
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- protocol: UDP
port: 53
- to:
- podSelector: {{}}
ports:
- protocol: TCP
port: {port}
"#,
name = name,
image = image,
port = port,
replicas = replicas,
namespace = namespace,
labels = labels
));
if let Some(host) = host {
docs.push(format!(
r#"---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {name}
namespace: {namespace}
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: \"true\"
nginx.ingress.kubernetes.io/proxy-body-size: 10m
labels:
{labels}
spec:
rules:
- host: {host}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {name}
port:
number: {port}
"#,
name = name,
namespace = namespace,
port = port,
host = host,
labels = labels
));
}
docs.join("\n")
}