use crate::cli::commands::{KubernetesAddonSubCommand, KubernetesCmd, KubernetesSubCommand};
use crate::logging::{log_error, log_info, log_success};
use crate::utils::command_exists;
use colored::Colorize;
use serde::Deserialize;
use serde_json::Value;
use std::path::PathBuf;
use std::process::Stdio;
use tokio::fs;
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
const COMMAND_NAME: &str = "kubernetes";
#[derive(Clone, Copy, Debug)]
enum KubectlBackend {
Kubectl,
Microk8s,
}
impl KubectlBackend {
fn command_label(self) -> &'static str {
match self {
KubectlBackend::Kubectl => "kubectl",
KubectlBackend::Microk8s => "microk8s kubectl",
}
}
}
#[derive(Debug, Deserialize)]
struct NodeCondition {
#[serde(rename = "type")]
kind: Option<String>,
status: Option<String>,
}
#[derive(Debug, Deserialize)]
struct NodeStatus {
conditions: Option<Vec<NodeCondition>>,
}
#[derive(Debug, Deserialize)]
struct NodeItem {
status: Option<NodeStatus>,
}
#[derive(Debug, Deserialize)]
struct NodeList {
items: Vec<NodeItem>,
}
pub async fn run_kubernetes(cmd: KubernetesCmd, debug: bool) -> Result<(), String> {
match cmd.command {
KubernetesSubCommand::Check {
context,
namespace,
offline,
} => check_cluster(context, namespace, offline, debug).await,
KubernetesSubCommand::Generate {
name,
image,
port,
replicas,
namespace,
output,
host,
} => generate_bundle(name, image, port, replicas, namespace, output, host, debug).await,
KubernetesSubCommand::Apply {
file,
context,
namespace,
dry_run,
} => apply_bundle(file, context, namespace, dry_run, debug).await,
KubernetesSubCommand::Status { namespace, context } => {
status(namespace, context, debug).await
}
KubernetesSubCommand::Addons(addons) => handle_addons(addons.command, debug).await,
KubernetesSubCommand::DashboardToken {
namespace,
secret,
context,
} => dashboard_token(namespace, secret, context, debug).await,
KubernetesSubCommand::ObservabilityCreds {
namespace,
secret,
context,
} => observability_creds(namespace, secret, context, debug).await,
KubernetesSubCommand::Issuer {
email,
name,
namespace,
server,
private_key_secret,
ingress_class_name,
context,
dry_run,
} => {
create_issuer(
email,
name,
namespace,
server,
private_key_secret,
ingress_class_name,
context,
dry_run,
debug,
)
.await
}
}
}
fn ensure_kube_cli() -> Result<KubectlBackend, String> {
if command_exists("kubectl") {
Ok(KubectlBackend::Kubectl)
} else if command_exists("microk8s") {
Ok(KubectlBackend::Microk8s)
} else {
Err("Neither kubectl nor microk8s was found in PATH".to_string())
}
}
fn ensure_microk8s_cli() -> Result<(), String> {
if command_exists("microk8s") {
Ok(())
} else {
Err("microk8s is required for addon management but was not found in PATH".to_string())
}
}
fn build_args(base: &[&str], context: Option<&String>, namespace: Option<&String>) -> Vec<String> {
let mut args: Vec<String> = base.iter().map(|s| s.to_string()).collect();
if let Some(ctx) = context {
args.push("--context".to_string());
args.push(ctx.clone());
}
if let Some(ns) = namespace {
args.push("-n".to_string());
args.push(ns.clone());
}
args
}
fn build_args_owned(
base: &[String],
context: Option<&String>,
namespace: Option<&String>,
) -> Vec<String> {
let mut args: Vec<String> = base.to_vec();
if let Some(ctx) = context {
args.push("--context".to_string());
args.push(ctx.clone());
}
if let Some(ns) = namespace {
args.push("-n".to_string());
args.push(ns.clone());
}
args
}
async fn run_kubectl(
base: &[&str],
context: Option<&String>,
namespace: Option<&String>,
) -> Result<String, String> {
let base_owned: Vec<String> = base.iter().map(|s| (*s).to_string()).collect();
run_kubectl_owned(&base_owned, context, namespace).await
}
async fn run_kubectl_owned(
base: &[String],
context: Option<&String>,
namespace: Option<&String>,
) -> Result<String, String> {
let backend = ensure_kube_cli()?;
let args = build_args_owned(base, context, namespace);
let output = run_kube_command(backend, &args, None).await?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Err(format_kube_error(backend, &args, &stderr))
}
}
async fn run_kubectl_with_stdin(
base: &[&str],
context: Option<&String>,
namespace: Option<&String>,
stdin_payload: &str,
) -> Result<String, String> {
let backend = ensure_kube_cli()?;
let args = build_args(base, context, namespace);
let output = run_kube_command(backend, &args, Some(stdin_payload)).await?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Err(format_kube_error(backend, &args, &stderr))
}
}
async fn run_kube_command(
backend: KubectlBackend,
args: &[String],
stdin_payload: Option<&str>,
) -> Result<std::process::Output, String> {
match backend {
KubectlBackend::Kubectl => {
run_command_capture_with_stdin("kubectl", args, stdin_payload).await
}
KubectlBackend::Microk8s => {
let mut microk8s_args = vec!["kubectl".to_string()];
microk8s_args.extend(args.iter().cloned());
run_microk8s_command(µk8s_args, stdin_payload).await
}
}
}
fn format_kube_error(backend: KubectlBackend, args: &[String], stderr: &str) -> String {
if matches!(backend, KubectlBackend::Microk8s)
&& stderr.contains("Insufficient permissions to access MicroK8s")
{
return format!(
"{} {:?} failed: {}\nHint: run `sudo usermod -a -G microk8s $USER`, `sudo chown -R $USER ~/.kube`, then reload groups with `newgrp microk8s` or reboot.",
backend.command_label(),
args,
stderr.trim()
);
}
format!("{} {:?} failed: {}", backend.command_label(), args, stderr)
}
async fn run_microk8s(args: &[&str]) -> Result<String, String> {
let microk8s_args: Vec<String> = args.iter().map(|a| (*a).to_string()).collect();
let output = run_microk8s_command(µk8s_args, None).await?;
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
let combined = if stdout.trim().is_empty() && !stderr.trim().is_empty() {
stderr
} else {
stdout
};
Ok(combined)
} else {
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
Err(format_microk8s_error(args, &stderr))
}
}
async fn run_microk8s_command(
args: &[String],
stdin_payload: Option<&str>,
) -> Result<std::process::Output, String> {
ensure_microk8s_cli()?;
let output = run_command_capture_with_stdin("microk8s", args, stdin_payload).await?;
if output.status.success() {
return Ok(output);
}
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
if !is_microk8s_permission_error(&stderr) {
return Ok(output);
}
if !command_exists("sudo") {
return Err(format!(
"microk8s {:?} failed: {}\nHint: retry with sudo, or run `sudo usermod -a -G microk8s $USER`, `sudo chown -R $USER ~/.kube`, then `newgrp microk8s`.",
args,
stderr.trim()
));
}
let mut sudo_args = vec!["-n".to_string(), "microk8s".to_string()];
sudo_args.extend(args.iter().cloned());
let sudo_output = run_command_capture_with_stdin("sudo", &sudo_args, stdin_payload).await?;
if sudo_output.status.success() {
return Ok(sudo_output);
}
let sudo_stderr = String::from_utf8_lossy(&sudo_output.stderr).to_string();
if is_sudo_password_required(&sudo_stderr) {
return Err(format!(
"microk8s {:?} failed due to permissions.\nAutomatic non-interactive sudo retry (`sudo -n microk8s ...`) was blocked because a password is required.\nRun the command with sudo manually, or add the user to group `microk8s` and reload groups (`newgrp microk8s`).",
args
));
}
Err(format!(
"microk8s {:?} failed: {}\nAutomatic sudo retry also failed: {}",
args,
stderr.trim(),
sudo_stderr.trim()
))
}
async fn run_command_capture_with_stdin(
program: &str,
args: &[String],
stdin_payload: Option<&str>,
) -> Result<std::process::Output, String> {
let mut cmd = Command::new(program);
cmd.args(args);
if stdin_payload.is_some() {
cmd.stdin(Stdio::piped());
}
let mut child = cmd
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.map_err(|e| format!("Failed to execute {} with args {:?}: {}", program, args, e))?;
if let Some(payload) = stdin_payload {
if let Some(mut stdin) = child.stdin.take() {
stdin
.write_all(payload.as_bytes())
.await
.map_err(|e| format!("Failed to stream stdin to {}: {}", program, e))?;
}
}
child
.wait_with_output()
.await
.map_err(|e| format!("Failed waiting for {} with args {:?}: {}", program, args, e))
}
fn is_microk8s_permission_error(stderr: &str) -> bool {
stderr
.to_ascii_lowercase()
.contains("insufficient permissions to access microk8s")
}
fn is_sudo_password_required(stderr: &str) -> bool {
let value = stderr.to_ascii_lowercase();
value.contains("a password is required")
|| value.contains("no tty present")
|| value.contains("askpass")
}
fn format_microk8s_error(args: &[&str], stderr: &str) -> String {
if stderr.contains("Insufficient permissions to access MicroK8s") {
return format!(
"microk8s {:?} failed: {}\nHint: retry with sudo (e.g. `sudo microk8s {}`), or run `sudo usermod -a -G microk8s $USER`, `sudo chown -R $USER ~/.kube`, then `newgrp microk8s`.",
args,
stderr.trim(),
args.join(" ")
);
}
format!("microk8s {:?} failed: {}", args, stderr)
}
async fn run_kubectl_json(
base: &[&str],
context: Option<&String>,
namespace: Option<&String>,
) -> Result<Value, String> {
let stdout = run_kubectl(base, context, namespace).await?;
serde_json::from_str(&stdout).map_err(|e| format!("Failed to parse kubectl JSON: {}", e))
}
async fn check_cluster(
context: Option<String>,
namespace: String,
offline: bool,
debug: bool,
) -> Result<(), String> {
let backend = ensure_kube_cli().map_err(|e| {
let _ = log_error(COMMAND_NAME, "Kubernetes CLI missing", Some(&e));
e
})?;
let _ = log_info(COMMAND_NAME, "Starting cluster check", None).await;
println!(
"{}",
"Kubernetes feature flag enabled (experimental)".bright_blue()
);
println!(
"{} {}",
"Detected CLI:".bright_blue(),
backend.command_label()
);
println!(
"{} kubectl={} microk8s={}",
"Available binaries:".bright_blue(),
if command_exists("kubectl") {
"yes"
} else {
"no"
},
if command_exists("microk8s") {
"yes"
} else {
"no"
}
);
if offline {
println!(
"{}",
"Offline mode: verified Kubernetes CLI presence. Skipping live cluster calls.".yellow()
);
return Ok(());
}
let ctx_str = context.clone();
let version = run_kubectl_json(&["version", "--output=json"], ctx_str.as_ref(), None).await?;
let client = version["clientVersion"]["gitVersion"]
.as_str()
.unwrap_or("unknown");
let server = version["serverVersion"]["gitVersion"]
.as_str()
.unwrap_or("unknown");
let current_context =
run_kubectl(&["config", "current-context"], ctx_str.as_ref(), None).await?;
let nodes: NodeList = serde_json::from_value(
run_kubectl_json(&["get", "nodes", "-o", "json"], ctx_str.as_ref(), None).await?,
)
.unwrap_or(NodeList { items: vec![] });
let ready_nodes = nodes
.items
.iter()
.filter(|n| {
n.status
.as_ref()
.and_then(|s| s.conditions.as_ref())
.and_then(|conds| {
conds.iter().find(|c| {
c.kind.as_deref() == Some("Ready") && c.status.as_deref() == Some("True")
})
})
.is_some()
})
.count();
let node_total = nodes.items.len();
println!("{} {}", "Context:".bright_blue(), current_context.trim());
println!(
"{} client={} server={}",
"Versions:".bright_blue(),
client,
server
);
println!(
"{} {}/{} Ready",
"Nodes:".bright_blue(),
ready_nodes,
node_total
);
let ns_arg = Some(namespace.clone());
let pods = run_kubectl(
&[
"get",
"pods",
"--no-headers",
"-o",
"custom-columns=NAME:.metadata.name,STATUS:.status.phase",
],
ctx_str.as_ref(),
ns_arg.as_ref(),
)
.await
.unwrap_or_else(|_| "No pods listed (namespace may be empty)".to_string());
println!("{} {}", "Namespace:".bright_blue(), namespace);
println!("{}", pods);
let _ = log_success(COMMAND_NAME, "Cluster check complete", None).await;
if debug {
let _ = log_info(COMMAND_NAME, "Cluster check ran in debug mode", None).await;
}
Ok(())
}
async fn generate_bundle(
name: String,
image: String,
port: u16,
replicas: u16,
namespace: String,
output: String,
host: Option<String>,
_debug: bool,
) -> Result<(), String> {
let _ = log_info(COMMAND_NAME, "Generating manifest bundle", Some(&name)).await;
let manifest = render_manifest(&name, &image, port, replicas, &namespace, host.as_deref());
let path = PathBuf::from(&output);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.await
.map_err(|e| format!("Failed to create directory {}: {}", parent.display(), e))?;
}
fs::write(&path, manifest)
.await
.map_err(|e| format!("Failed to write manifest {}: {}", path.display(), e))?;
println!("{} {}", "Wrote manifest bundle to".green(), path.display());
println!(
"- Deployment, Service, ServiceAccount, Role/RoleBinding, NetworkPolicy{}",
if host.is_some() { ", Ingress" } else { "" }
);
println!(
"- Safe defaults: runAsNonRoot, seccomp=RuntimeDefault, probes, requests/limits, namespace-scoped RBAC"
);
let _ = log_success(COMMAND_NAME, "Manifest bundle generated", Some(&output)).await;
Ok(())
}
async fn apply_bundle(
file: String,
context: Option<String>,
namespace: Option<String>,
dry_run: bool,
_debug: bool,
) -> Result<(), String> {
let mut args: Vec<&str> = vec!["apply", "-f"];
let path = PathBuf::from(&file);
if !path.exists() {
return Err(format!("Manifest file not found: {}", file));
}
let file_str = path
.to_str()
.ok_or_else(|| "Invalid manifest path".to_string())?;
args.push(file_str);
if dry_run {
args.push("--dry-run=server");
}
let output = run_kubectl(&args, context.as_ref(), namespace.as_ref()).await?;
println!("{}", output);
let _ = log_success(COMMAND_NAME, "kubectl apply completed", Some(&file)).await;
Ok(())
}
async fn status(namespace: String, context: Option<String>, _debug: bool) -> Result<(), String> {
let ns = Some(namespace.clone());
let output = run_kubectl(
&["get", "deploy,svc,pods", "-o", "wide"],
context.as_ref(),
ns.as_ref(),
)
.await?;
println!("{} {}", "Namespace:".bright_blue(), namespace);
println!("{}", output);
let _ = log_info(COMMAND_NAME, "Rendered namespace status", None).await;
Ok(())
}
async fn handle_addons(command: KubernetesAddonSubCommand, _debug: bool) -> Result<(), String> {
let _ = log_info(COMMAND_NAME, "Managing MicroK8s addons", None).await;
match command {
KubernetesAddonSubCommand::List => {
let output = run_microk8s(&["status"]).await?;
println!("{}", output);
let _ = log_success(COMMAND_NAME, "Fetched MicroK8s addon status", None).await;
}
KubernetesAddonSubCommand::Enable { name } => {
let output = run_microk8s(&["enable", &name]).await?;
println!("{}", output);
let _ = log_success(COMMAND_NAME, "Enabled MicroK8s addon", Some(&name)).await;
}
KubernetesAddonSubCommand::Disable { name } => {
let output = run_microk8s(&["disable", &name]).await?;
println!("{}", output);
let _ = log_success(COMMAND_NAME, "Disabled MicroK8s addon", Some(&name)).await;
}
}
Ok(())
}
async fn dashboard_token(
namespace: String,
secret: String,
context: Option<String>,
_debug: bool,
) -> Result<(), String> {
let _ = log_info(
COMMAND_NAME,
"Reading Kubernetes dashboard token",
Some(&format!("{}/{}", namespace, secret)),
)
.await;
let ns = Some(namespace.clone());
let output = run_kubectl(
&["describe", "secret", &secret],
context.as_ref(),
ns.as_ref(),
)
.await?;
let token = extract_secret_token_from_describe(&output).ok_or_else(|| {
format!(
"Could not locate a `token:` field in secret describe output for {}/{}.",
namespace, secret
)
})?;
println!("{}", token);
let _ = log_success(COMMAND_NAME, "Dashboard token extracted", Some(&secret)).await;
Ok(())
}
fn extract_secret_token_from_describe(output: &str) -> Option<String> {
output.lines().find_map(|line| {
let trimmed = line.trim();
if !trimmed.starts_with("token:") {
return None;
}
let value = trimmed.trim_start_matches("token:").trim();
if value.is_empty() {
None
} else {
Some(value.to_string())
}
})
}
async fn observability_creds(
namespace: String,
secret: String,
context: Option<String>,
_debug: bool,
) -> Result<(), String> {
let _ = log_info(
COMMAND_NAME,
"Reading observability Grafana credentials",
Some(&format!("{}/{}", namespace, secret)),
)
.await;
let ns = Some(namespace.clone());
let user_template = r#"go-template={{index .data "admin-user" | base64decode}}"#.to_string();
let password_template =
r#"go-template={{index .data "admin-password" | base64decode}}"#.to_string();
let user_args = vec![
"get".to_string(),
"secret".to_string(),
secret.clone(),
"-o".to_string(),
user_template,
];
let password_args = vec![
"get".to_string(),
"secret".to_string(),
secret.clone(),
"-o".to_string(),
password_template,
];
let username = run_kubectl_owned(&user_args, context.as_ref(), ns.as_ref())
.await
.map(|value| value.trim().to_string())
.ok()
.filter(|value| !value.is_empty())
.unwrap_or_else(|| "admin".to_string());
let password = run_kubectl_owned(&password_args, context.as_ref(), ns.as_ref())
.await
.map(|value| value.trim().to_string())
.map_err(|e| {
format!(
"Unable to extract `admin-password` from secret {}/{}: {}",
namespace, secret, e
)
})?;
if password.is_empty() {
return Err(format!(
"Extracted empty `admin-password` from secret {}/{}.",
namespace, secret
));
}
println!("username: {}", username);
println!("password: {}", password);
let _ = log_success(
COMMAND_NAME,
"Observability credentials extracted",
Some(&secret),
)
.await;
Ok(())
}
async fn create_issuer(
email: String,
name: String,
namespace: String,
server: String,
private_key_secret: String,
ingress_class_name: String,
context: Option<String>,
dry_run: bool,
_debug: bool,
) -> Result<(), String> {
let _ = log_info(
COMMAND_NAME,
"Creating cert-manager Issuer",
Some(&format!("{} in {}", name, namespace)),
)
.await;
let backend = ensure_kube_cli().map_err(|e| {
let _ = log_error(COMMAND_NAME, "Kubernetes CLI missing", Some(&e));
e
})?;
let _ = run_kubectl(&["get", "crd", "issuers.cert-manager.io"], context.as_ref(), None)
.await
.map_err(|e| {
format!(
"cert-manager CRD `issuers.cert-manager.io` not found. Install cert-manager before creating an Issuer. Details: {}",
e
)
})?;
let issuer_manifest = render_issuer_manifest(
&email,
&name,
&namespace,
&server,
&private_key_secret,
&ingress_class_name,
);
let mut apply_args = vec!["apply", "-f", "-"];
if dry_run {
apply_args.push("--dry-run=server");
}
let output = run_kubectl_with_stdin(
&apply_args,
context.as_ref(),
Some(&namespace),
&issuer_manifest,
)
.await?;
println!("{}", output);
println!(
"{} {} (namespace: {}, cli: {})",
"Issuer applied:".green(),
name,
namespace,
backend.command_label()
);
let _ = log_success(COMMAND_NAME, "Issuer created/updated", Some(&name)).await;
Ok(())
}
fn render_manifest(
name: &str,
image: &str,
port: u16,
replicas: u16,
namespace: &str,
host: Option<&str>,
) -> String {
let labels = format!(
"app.kubernetes.io/name: {name}\n app.kubernetes.io/managed-by: xbp\n xbp.dev/feature: kubernetes\n xbp.dev/profile: experimental"
);
let mut docs: Vec<String> = Vec::new();
if namespace != "default" {
docs.push(format!(
r#"apiVersion: v1
kind: Namespace
metadata:
name: {namespace}
labels:
{labels}
---
"#,
namespace = namespace,
labels = labels
));
}
docs.push(format!(
r#"apiVersion: v1
kind: ServiceAccount
metadata:
name: {name}-sa
namespace: {namespace}
labels:
{labels}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {name}-reader
namespace: {namespace}
labels:
{labels}
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints", "configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {name}-reader
namespace: {namespace}
labels:
{labels}
subjects:
- kind: ServiceAccount
name: {name}-sa
namespace: {namespace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {name}-reader
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {name}
namespace: {namespace}
labels:
{labels}
spec:
replicas: {replicas}
selector:
matchLabels:
app.kubernetes.io/name: {name}
template:
metadata:
labels:
{labels}
annotations:
xbp.dev/generated-by: kubernetes-feature
spec:
serviceAccountName: {name}-sa
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: {name}
image: {image}
imagePullPolicy: IfNotPresent
ports:
- containerPort: {port}
name: http
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
resources:
requests:
cpu: \"100m\"
memory: \"128Mi\"
limits:
cpu: \"500m\"
memory: \"512Mi\"
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 2
readinessProbe:
httpGet:
path: /readyz
port: http
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
---
apiVersion: v1
kind: Service
metadata:
name: {name}
namespace: {namespace}
labels:
{labels}
spec:
selector:
app.kubernetes.io/name: {name}
ports:
- name: http
port: {port}
targetPort: {port}
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {name}-default-deny
namespace: {namespace}
labels:
{labels}
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: {name}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector: {{}}
ports:
- protocol: TCP
port: {port}
egress:
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- protocol: UDP
port: 53
- to:
- podSelector: {{}}
ports:
- protocol: TCP
port: {port}
"#,
name = name,
image = image,
port = port,
replicas = replicas,
namespace = namespace,
labels = labels
));
if let Some(host) = host {
docs.push(format!(
r#"---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {name}
namespace: {namespace}
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: \"true\"
nginx.ingress.kubernetes.io/proxy-body-size: 10m
labels:
{labels}
spec:
rules:
- host: {host}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {name}
port:
number: {port}
"#,
name = name,
namespace = namespace,
port = port,
host = host,
labels = labels
));
}
docs.join("\n")
}
fn render_issuer_manifest(
email: &str,
name: &str,
namespace: &str,
server: &str,
private_key_secret: &str,
ingress_class_name: &str,
) -> String {
format!(
r#"apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {name}
namespace: {namespace}
spec:
acme:
email: {email}
server: {server}
privateKeySecretRef:
name: {private_key_secret}
solvers:
- http01:
ingress:
ingressClassName: {ingress_class_name}
"#,
email = email,
name = name,
namespace = namespace,
server = server,
private_key_secret = private_key_secret,
ingress_class_name = ingress_class_name
)
}