use super::config::KubernetesConfig;
use super::error::{KubernetesError, KubernetesResult};
use base64::Engine;
use k8s_openapi::api::apps::v1::Deployment;
use k8s_openapi::api::core::v1::{ConfigMap, Namespace, Node, Pod, Secret, Service};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
use kube::config::{KubeConfigOptions, Kubeconfig};
use kube::{Api, Client, Config};
use regex::Regex;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Semaphore;
use tokio::time::timeout;
use tokio_retry::Retry;
use tokio_retry::strategy::ExponentialBackoff;
const PROTECTED_NAMESPACES: &[&str] = &[
"default",
"kube-system",
"kube-public",
"kube-node-lease",
"kube-apiserver",
];
#[derive(Clone)]
pub struct KubernetesManager {
client: Client,
namespace: String,
config: KubernetesConfig,
rate_limiter: Arc<Semaphore>,
last_request: Arc<tokio::sync::Mutex<Instant>>,
}
impl KubernetesManager {
pub async fn new(namespace: impl Into<String>) -> KubernetesResult<Self> {
Self::with_config(namespace, KubernetesConfig::default()).await
}
pub async fn with_config(
namespace: impl Into<String>,
config: KubernetesConfig,
) -> KubernetesResult<Self> {
let client = Self::create_kube_client().await?;
Self::validate_cluster_connectivity(&client).await?;
let rate_limiter = Arc::new(Semaphore::new(config.rate_limit_burst as usize));
let last_request = Arc::new(tokio::sync::Mutex::new(Instant::now()));
Ok(Self {
client,
namespace: namespace.into(),
config,
rate_limiter,
last_request,
})
}
async fn create_kube_client() -> KubernetesResult<Client> {
if let Ok(k8s_config) = Config::infer().await {
if let Ok(client) = Client::try_from(k8s_config) {
return Ok(client);
}
}
let k3s_path = "/etc/rancher/k3s/k3s.yaml";
if std::path::Path::new(k3s_path).exists() {
log::debug!("Trying k3s config at {}", k3s_path);
if let Ok(kubeconfig) = Kubeconfig::read_from(k3s_path) {
if let Ok(config) =
Config::from_custom_kubeconfig(kubeconfig, &KubeConfigOptions::default()).await
{
if let Ok(client) = Client::try_from(config) {
log::info!("Using k3s kubeconfig from {}", k3s_path);
return Ok(client);
}
}
}
}
let microk8s_path = "/var/snap/microk8s/current/credentials/client.config";
if std::path::Path::new(microk8s_path).exists() {
log::debug!("Trying microk8s config at {}", microk8s_path);
if let Ok(kubeconfig) = Kubeconfig::read_from(microk8s_path) {
if let Ok(config) =
Config::from_custom_kubeconfig(kubeconfig, &KubeConfigOptions::default()).await
{
if let Ok(client) = Client::try_from(config) {
log::info!("Using microk8s kubeconfig from {}", microk8s_path);
return Ok(client);
}
}
}
}
if let Some(kubeconfig_path) = Self::detect_kubeconfig_from_kubectl() {
log::debug!("Detected kubeconfig at {} (kubectl works)", kubeconfig_path);
if let Ok(kubeconfig) = Kubeconfig::read_from(&kubeconfig_path) {
if let Ok(config) =
Config::from_custom_kubeconfig(kubeconfig, &KubeConfigOptions::default()).await
{
if let Ok(client) = Client::try_from(config) {
log::info!(
"Using kubeconfig from {} (detected via kubectl)",
kubeconfig_path
);
return Ok(client);
}
}
}
}
Err(KubernetesError::config_error(
"❌ No Kubernetes cluster configuration found!\n\n\
Tried:\n \
- In-cluster config (not running inside Kubernetes)\n \
- Kubeconfig file at ~/.kube/config (not found)\n \
- KUBECONFIG environment variable (not set or invalid)\n \
- k3s config at /etc/rancher/k3s/k3s.yaml\n \
- microk8s config at /var/snap/microk8s/current/credentials/client.config\n \
- Detection via kubectl\n\n\
To fix this, either:\n \
- Set the KUBECONFIG environment variable to point to your kubeconfig file\n \
- Copy your kubeconfig to ~/.kube/config\n \
- Ensure the kubeconfig file has correct permissions\n \
- Run this command inside a Kubernetes cluster",
))
}
fn detect_kubeconfig_from_kubectl() -> Option<String> {
use std::process::Command;
let output = Command::new("kubectl")
.args([
"config",
"view",
"--minify",
"-o",
"jsonpath={.clusters[0].name}",
])
.output()
.ok()?;
if !output.status.success() {
return None;
}
if let Ok(kubeconfig) = std::env::var("KUBECONFIG") {
if std::path::Path::new(&kubeconfig).exists() {
return Some(kubeconfig);
}
}
let k3s_path = "/etc/rancher/k3s/k3s.yaml";
if std::path::Path::new(k3s_path).exists() {
return Some(k3s_path.to_string());
}
let microk8s_path = "/var/snap/microk8s/current/credentials/client.config";
if std::path::Path::new(microk8s_path).exists() {
return Some(microk8s_path.to_string());
}
None
}
async fn validate_cluster_connectivity(client: &Client) -> KubernetesResult<()> {
log::info!("🔍 Validating Kubernetes cluster connectivity...");
match client.apiserver_version().await {
Ok(version) => {
log::info!(
"✅ Connected to Kubernetes cluster (version: {})",
version.git_version
);
Ok(())
}
Err(e) => {
let error_msg = e.to_string();
if error_msg.contains("connection refused") {
Err(KubernetesError::config_error(
"❌ Kubernetes cluster is not reachable!\n\n\
The cluster appears to be down or unreachable.\n\
Try: `kubectl get nodes` to verify connectivity.\n\n\
If using minikube: `minikube start`\n\
If using kind: `kind create cluster`",
))
} else if error_msg.contains("Unauthorized") || error_msg.contains("Forbidden") {
Err(KubernetesError::permission_denied(
"❌ Access denied to Kubernetes cluster!\n\n\
You don't have permission to access this cluster.\n\
Check your kubeconfig and RBAC permissions.",
))
} else {
Err(KubernetesError::config_error(format!(
"❌ Failed to connect to Kubernetes cluster!\n\n\
Error: {error_msg}\n\n\
Please verify:\n\
1. Cluster is running: `kubectl get nodes`\n\
2. Network connectivity\n\
3. Authentication credentials"
)))
}
}
}
}
pub fn namespace(&self) -> &str {
&self.namespace
}
pub fn client(&self) -> &Client {
&self.client
}
pub fn config(&self) -> &KubernetesConfig {
&self.config
}
async fn execute_with_safety<F, Fut, T>(&self, operation: F) -> KubernetesResult<T>
where
F: Fn() -> Fut + Send + Sync,
Fut: std::future::Future<Output = KubernetesResult<T>> + Send,
T: Send,
{
self.rate_limit().await?;
let retry_strategy =
ExponentialBackoff::from_millis(self.config.retry_base_delay.as_millis() as u64)
.max_delay(self.config.retry_max_delay)
.take(self.config.max_retries as usize);
let result = Retry::spawn(retry_strategy, || async {
match timeout(self.config.operation_timeout, operation()).await {
Ok(result) => result.map_err(|e| {
match &e {
KubernetesError::ApiError(kube_err) => {
if is_retryable_error(kube_err) {
log::warn!("Retryable error encountered: {e}");
e
} else {
log::error!("Non-retryable error: {e}");
KubernetesError::operation_error(format!("Non-retryable: {e}"))
}
}
_ => {
log::warn!("Retrying operation due to error: {e}");
e
}
}
}),
Err(_) => {
let timeout_err = KubernetesError::timeout(format!(
"Operation timed out after {:?}",
self.config.operation_timeout
));
log::error!("Operation timeout: {:?}", self.config.operation_timeout);
Err(timeout_err)
}
}
})
.await;
result
}
async fn rate_limit(&self) -> KubernetesResult<()> {
let _permit = self
.rate_limiter
.acquire()
.await
.map_err(|_| KubernetesError::operation_error("Rate limiter semaphore closed"))?;
let mut last_request = self.last_request.lock().await;
let now = Instant::now();
let min_interval = Duration::from_millis(1000 / self.config.rate_limit_rps as u64);
if let Some(sleep_duration) = min_interval.checked_sub(now.duration_since(*last_request)) {
tokio::time::sleep(sleep_duration).await;
}
*last_request = Instant::now();
Ok(())
}
pub async fn pods_list(&self) -> KubernetesResult<Vec<Pod>> {
self.execute_with_safety(|| async {
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
let pod_list = pods.list(&Default::default()).await?;
Ok(pod_list.items)
})
.await
}
pub async fn services_list(&self) -> KubernetesResult<Vec<Service>> {
self.execute_with_safety(|| async {
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
let service_list = services.list(&Default::default()).await?;
Ok(service_list.items)
})
.await
}
pub async fn deployments_list(&self) -> KubernetesResult<Vec<Deployment>> {
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), &self.namespace);
let deployment_list = deployments.list(&Default::default()).await?;
Ok(deployment_list.items)
}
pub async fn configmaps_list(&self) -> KubernetesResult<Vec<ConfigMap>> {
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), &self.namespace);
let configmap_list = configmaps.list(&Default::default()).await?;
Ok(configmap_list.items)
}
pub async fn secrets_list(&self) -> KubernetesResult<Vec<Secret>> {
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), &self.namespace);
let secret_list = secrets.list(&Default::default()).await?;
Ok(secret_list.items)
}
pub async fn configmap_create(
&self,
name: &str,
data: HashMap<String, String>,
) -> KubernetesResult<ConfigMap> {
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), &self.namespace);
let configmap = ConfigMap {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
..Default::default()
},
data: Some(data.into_iter().collect()),
..Default::default()
};
let created_configmap = configmaps.create(&Default::default(), &configmap).await?;
log::info!("Created ConfigMap '{name}'");
Ok(created_configmap)
}
pub async fn configmap_get(&self, name: &str) -> KubernetesResult<ConfigMap> {
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), &self.namespace);
let configmap = configmaps.get(name).await?;
Ok(configmap)
}
pub async fn secret_create(
&self,
name: &str,
data: HashMap<String, String>,
secret_type: Option<&str>,
) -> KubernetesResult<Secret> {
use k8s_openapi::ByteString;
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), &self.namespace);
let encoded_data: std::collections::BTreeMap<String, ByteString> = data
.into_iter()
.map(|(k, v)| {
let encoded = base64::engine::general_purpose::STANDARD.encode(v.as_bytes());
(k, ByteString(encoded.into_bytes()))
})
.collect();
let secret = Secret {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
..Default::default()
},
data: Some(encoded_data),
type_: Some(secret_type.unwrap_or("Opaque").to_string()),
..Default::default()
};
let created_secret = secrets.create(&Default::default(), &secret).await?;
log::info!("Created Secret '{name}'");
Ok(created_secret)
}
pub async fn secret_get(&self, name: &str) -> KubernetesResult<Secret> {
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), &self.namespace);
let secret = secrets.get(name).await?;
Ok(secret)
}
pub async fn namespace_create(&self, name: &str) -> KubernetesResult<()> {
let name = name.to_string(); self.execute_with_safety(move || {
let name = name.clone();
let client = self.client.clone();
async move {
let namespaces: Api<Namespace> = Api::all(client);
match namespaces.get(&name).await {
Ok(_) => {
log::info!("Namespace '{name}' already exists");
return Ok(());
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
}
Err(e) => return Err(KubernetesError::ApiError(e)),
}
let namespace = Namespace {
metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta {
name: Some(name.clone()),
..Default::default()
},
..Default::default()
};
namespaces.create(&Default::default(), &namespace).await?;
log::info!("Created namespace '{name}'");
match namespaces.get(&name).await {
Ok(_) => {
log::info!("Verified namespace '{name}' exists");
Ok(())
}
Err(e) => {
log::error!("Failed to verify namespace '{name}' was created: {e}");
Err(KubernetesError::operation_error(format!(
"Namespace '{name}' was not created successfully. Verification failed: {e}"
)))
}
}
}
})
.await
}
pub async fn delete(&self, pattern: &str) -> KubernetesResult<usize> {
let regex = Regex::new(pattern)?;
log::warn!(
"🚨 DESTRUCTIVE OPERATION: Starting bulk deletion with pattern '{}' in namespace '{}'",
pattern,
self.namespace
);
let mut deleted_count = 0;
let mut failed_deletions = Vec::new();
match self.delete_pods_matching(®ex).await {
Ok(count) => deleted_count += count,
Err(e) => {
log::error!("Failed to delete pods matching pattern '{pattern}': {e}");
failed_deletions.push(format!("pods: {e}"));
}
}
match self.delete_services_matching(®ex).await {
Ok(count) => deleted_count += count,
Err(e) => {
log::error!("Failed to delete services matching pattern '{pattern}': {e}");
failed_deletions.push(format!("services: {e}"));
}
}
match self.delete_deployments_matching(®ex).await {
Ok(count) => deleted_count += count,
Err(e) => {
log::error!("Failed to delete deployments matching pattern '{pattern}': {e}");
failed_deletions.push(format!("deployments: {e}"));
}
}
match self.delete_configmaps_matching(®ex).await {
Ok(count) => deleted_count += count,
Err(e) => {
log::error!("Failed to delete configmaps matching pattern '{pattern}': {e}");
failed_deletions.push(format!("configmaps: {e}"));
}
}
match self.delete_secrets_matching(®ex).await {
Ok(count) => deleted_count += count,
Err(e) => {
log::error!("Failed to delete secrets matching pattern '{pattern}': {e}");
failed_deletions.push(format!("secrets: {e}"));
}
}
if !failed_deletions.is_empty() {
log::error!(
"Bulk deletion completed with {} successes and {} failures. Failed: [{}]",
deleted_count,
failed_deletions.len(),
failed_deletions.join(", ")
);
return Err(KubernetesError::operation_error(format!(
"Partial deletion failure: {} resources deleted, {} resource types failed: {}",
deleted_count,
failed_deletions.len(),
failed_deletions.join(", ")
)));
}
log::info!(
"✅ Successfully deleted {} resources matching pattern '{}' in namespace '{}'",
deleted_count,
pattern,
self.namespace
);
Ok(deleted_count)
}
async fn delete_pods_matching(&self, regex: &Regex) -> KubernetesResult<usize> {
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
let pod_list = pods.list(&Default::default()).await?;
let mut deleted = 0;
for pod in pod_list.items {
if let Some(name) = &pod.metadata.name {
if regex.is_match(name) {
match pods.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted pod '{name}'");
deleted += 1;
}
Err(e) => {
log::error!("Failed to delete pod '{name}': {e}");
}
}
}
}
}
Ok(deleted)
}
async fn delete_services_matching(&self, regex: &Regex) -> KubernetesResult<usize> {
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
let service_list = services.list(&Default::default()).await?;
let mut deleted = 0;
for service in service_list.items {
if let Some(name) = &service.metadata.name {
if regex.is_match(name) {
match services.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted service '{name}'");
deleted += 1;
}
Err(e) => {
log::error!("Failed to delete service '{name}': {e}");
}
}
}
}
}
Ok(deleted)
}
async fn delete_deployments_matching(&self, regex: &Regex) -> KubernetesResult<usize> {
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), &self.namespace);
let deployment_list = deployments.list(&Default::default()).await?;
let mut deleted = 0;
for deployment in deployment_list.items {
if let Some(name) = &deployment.metadata.name {
if regex.is_match(name) {
match deployments.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted deployment '{name}'");
deleted += 1;
}
Err(e) => {
log::error!("Failed to delete deployment '{name}': {e}");
}
}
}
}
}
Ok(deleted)
}
async fn delete_configmaps_matching(&self, regex: &Regex) -> KubernetesResult<usize> {
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), &self.namespace);
let configmap_list = configmaps.list(&Default::default()).await?;
let mut deleted = 0;
for configmap in configmap_list.items {
if let Some(name) = &configmap.metadata.name {
if regex.is_match(name) {
match configmaps.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted configmap '{name}'");
deleted += 1;
}
Err(e) => {
log::error!("Failed to delete configmap '{name}': {e}");
}
}
}
}
}
Ok(deleted)
}
async fn delete_secrets_matching(&self, regex: &Regex) -> KubernetesResult<usize> {
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), &self.namespace);
let secret_list = secrets.list(&Default::default()).await?;
let mut deleted = 0;
for secret in secret_list.items {
if let Some(name) = &secret.metadata.name {
if regex.is_match(name) {
match secrets.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted secret '{name}'");
deleted += 1;
}
Err(e) => {
log::error!("Failed to delete secret '{name}': {e}");
}
}
}
}
}
Ok(deleted)
}
pub async fn pod_create(
&self,
name: &str,
image: &str,
labels: Option<HashMap<String, String>>,
env_vars: Option<HashMap<String, String>>,
) -> KubernetesResult<Pod> {
use k8s_openapi::api::core::v1::{Container, PodSpec};
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
let pod = Pod {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
labels: labels.map(|l| l.into_iter().collect()),
..Default::default()
},
spec: Some(PodSpec {
containers: vec![{
let mut container = Container {
name: name.to_string(),
image: Some(image.to_string()),
..Default::default()
};
if let Some(env_vars) = env_vars {
use k8s_openapi::api::core::v1::EnvVar;
container.env = Some(
env_vars
.into_iter()
.map(|(key, value)| EnvVar {
name: key,
value: Some(value),
..Default::default()
})
.collect(),
);
}
container
}],
..Default::default()
}),
..Default::default()
};
let created_pod = pods.create(&Default::default(), &pod).await?;
log::info!("Created pod '{name}' with image '{image}'");
Ok(created_pod)
}
pub async fn pod_get(&self, name: &str) -> KubernetesResult<Pod> {
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
let pod = pods.get(name).await?;
Ok(pod)
}
pub async fn service_create(
&self,
name: &str,
selector: HashMap<String, String>,
port: i32,
target_port: Option<i32>,
) -> KubernetesResult<Service> {
use k8s_openapi::api::core::v1::{ServicePort, ServiceSpec};
use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
let service = Service {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
..Default::default()
},
spec: Some(ServiceSpec {
selector: Some(selector.into_iter().collect()),
ports: Some(vec![ServicePort {
port,
target_port: Some(IntOrString::Int(target_port.unwrap_or(port))),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
let created_service = services.create(&Default::default(), &service).await?;
log::info!("Created service '{name}' on port {port}");
Ok(created_service)
}
pub async fn service_create_lb(
&self,
name: &str,
selector: HashMap<String, String>,
port: i32,
target_port: Option<i32>,
) -> KubernetesResult<Service> {
use k8s_openapi::api::core::v1::{ServicePort, ServiceSpec};
use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
let service = Service {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
..Default::default()
},
spec: Some(ServiceSpec {
type_: Some("LoadBalancer".to_string()),
selector: Some(selector.into_iter().collect()),
ports: Some(vec![ServicePort {
port,
target_port: Some(IntOrString::Int(target_port.unwrap_or(port))),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
let created_service = services.create(&Default::default(), &service).await?;
log::info!("Created LoadBalancer service '{name}' on port {port}");
Ok(created_service)
}
pub async fn service_get(&self, name: &str) -> KubernetesResult<Service> {
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
let service = services.get(name).await?;
Ok(service)
}
pub async fn deployment_create(
&self,
name: &str,
image: &str,
replicas: i32,
labels: Option<HashMap<String, String>>,
env_vars: Option<HashMap<String, String>>,
) -> KubernetesResult<Deployment> {
use k8s_openapi::api::apps::v1::DeploymentSpec;
use k8s_openapi::api::core::v1::{Container, PodSpec, PodTemplateSpec};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector;
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), &self.namespace);
let labels_btree = labels
.as_ref()
.map(|l| l.iter().map(|(k, v)| (k.clone(), v.clone())).collect());
let selector_labels = labels.clone().unwrap_or_else(|| {
let mut default_labels = HashMap::new();
default_labels.insert("app".to_string(), name.to_string());
default_labels
});
let deployment = Deployment {
metadata: ObjectMeta {
name: Some(name.to_string()),
namespace: Some(self.namespace.clone()),
labels: labels_btree.clone(),
..Default::default()
},
spec: Some(DeploymentSpec {
replicas: Some(replicas),
selector: LabelSelector {
match_labels: Some(selector_labels.clone().into_iter().collect()),
..Default::default()
},
template: PodTemplateSpec {
metadata: Some(ObjectMeta {
labels: Some(selector_labels.into_iter().collect()),
..Default::default()
}),
spec: Some(PodSpec {
containers: vec![{
let mut container = Container {
name: name.to_string(),
image: Some(image.to_string()),
..Default::default()
};
if let Some(env_vars) = env_vars {
use k8s_openapi::api::core::v1::EnvVar;
container.env = Some(
env_vars
.into_iter()
.map(|(key, value)| EnvVar {
name: key,
value: Some(value),
..Default::default()
})
.collect(),
);
}
container
}],
..Default::default()
}),
},
..Default::default()
}),
..Default::default()
};
let created_deployment = deployments.create(&Default::default(), &deployment).await?;
log::info!("Created deployment '{name}' with {replicas} replicas using image '{image}'");
Ok(created_deployment)
}
pub async fn deployment_get(&self, name: &str) -> KubernetesResult<Deployment> {
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), &self.namespace);
let deployment = deployments.get(name).await?;
Ok(deployment)
}
pub async fn pod_delete(&self, name: &str) -> KubernetesResult<()> {
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
match pods.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted pod '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Pod '{name}' does not exist, nothing to delete");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn service_delete(&self, name: &str) -> KubernetesResult<()> {
let services: Api<Service> = Api::namespaced(self.client.clone(), &self.namespace);
match services.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted service '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Service '{name}' does not exist, nothing to delete");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn deployment_delete(&self, name: &str) -> KubernetesResult<()> {
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), &self.namespace);
match deployments.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted deployment '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Deployment '{name}' does not exist, nothing to delete");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn configmap_delete(&self, name: &str) -> KubernetesResult<()> {
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), &self.namespace);
match configmaps.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted ConfigMap '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("ConfigMap '{name}' does not exist, nothing to delete");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn secret_delete(&self, name: &str) -> KubernetesResult<()> {
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), &self.namespace);
match secrets.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted Secret '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Secret '{name}' does not exist, nothing to delete");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn resource_counts(&self) -> KubernetesResult<HashMap<String, usize>> {
let mut counts = HashMap::new();
let pods = self.pods_list().await?;
counts.insert("pods".to_string(), pods.len());
let services = self.services_list().await?;
counts.insert("services".to_string(), services.len());
let deployments = self.deployments_list().await?;
counts.insert("deployments".to_string(), deployments.len());
let configmaps = self.configmaps_list().await?;
counts.insert("configmaps".to_string(), configmaps.len());
let secrets = self.secrets_list().await?;
counts.insert("secrets".to_string(), secrets.len());
Ok(counts)
}
pub async fn namespace_exists(&self, name: &str) -> KubernetesResult<bool> {
let namespaces: Api<Namespace> = Api::all(self.client.clone());
match namespaces.get(name).await {
Ok(_) => Ok(true),
Err(kube::Error::Api(api_err)) if api_err.code == 404 => Ok(false),
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
pub async fn namespaces_list(&self) -> KubernetesResult<Vec<Namespace>> {
let namespaces: Api<Namespace> = Api::all(self.client.clone());
let namespace_list = namespaces.list(&Default::default()).await?;
Ok(namespace_list.items)
}
pub async fn namespace_delete(&self, name: &str) -> KubernetesResult<()> {
if PROTECTED_NAMESPACES.contains(&name) {
return Err(KubernetesError::operation_error(format!(
"Cannot delete protected namespace '{}'. Protected namespaces: {}",
name,
PROTECTED_NAMESPACES.join(", ")
)));
}
let namespaces: Api<Namespace> = Api::all(self.client.clone());
match namespaces.get(name).await {
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Namespace '{name}' does not exist, nothing to delete");
return Ok(());
}
Err(e) => return Err(KubernetesError::ApiError(e)),
Ok(_) => {}
}
log::warn!("🚨 DESTRUCTIVE OPERATION: Deleting namespace '{name}' and ALL its resources!");
self.delete_all_resources_in_namespace(name).await?;
match namespaces.delete(name, &Default::default()).await {
Ok(_) => {
log::info!("Deleted namespace '{name}'");
Ok(())
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Namespace '{name}' already deleted");
Ok(())
}
Err(e) => Err(KubernetesError::ApiError(e)),
}
}
async fn delete_all_resources_in_namespace(&self, namespace: &str) -> KubernetesResult<()> {
use k8s_openapi::api::apps::v1::{DaemonSet, ReplicaSet, StatefulSet};
use k8s_openapi::api::batch::v1::{CronJob, Job};
use k8s_openapi::api::core::v1::PersistentVolumeClaim;
use kube::api::DeleteParams;
let delete_params = DeleteParams::default();
let deployments: Api<Deployment> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = deployments.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting deployment '{name}' in namespace '{namespace}'");
let _ = deployments.delete(&name, &delete_params).await;
}
}
}
let statefulsets: Api<StatefulSet> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = statefulsets.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting statefulset '{name}' in namespace '{namespace}'");
let _ = statefulsets.delete(&name, &delete_params).await;
}
}
}
let daemonsets: Api<DaemonSet> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = daemonsets.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting daemonset '{name}' in namespace '{namespace}'");
let _ = daemonsets.delete(&name, &delete_params).await;
}
}
}
let replicasets: Api<ReplicaSet> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = replicasets.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting replicaset '{name}' in namespace '{namespace}'");
let _ = replicasets.delete(&name, &delete_params).await;
}
}
}
let jobs: Api<Job> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = jobs.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting job '{name}' in namespace '{namespace}'");
let _ = jobs.delete(&name, &delete_params).await;
}
}
}
let cronjobs: Api<CronJob> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = cronjobs.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting cronjob '{name}' in namespace '{namespace}'");
let _ = cronjobs.delete(&name, &delete_params).await;
}
}
}
let pods: Api<Pod> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = pods.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting pod '{name}' in namespace '{namespace}'");
let _ = pods.delete(&name, &delete_params).await;
}
}
}
let services: Api<Service> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = services.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting service '{name}' in namespace '{namespace}'");
let _ = services.delete(&name, &delete_params).await;
}
}
}
let configmaps: Api<ConfigMap> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = configmaps.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting configmap '{name}' in namespace '{namespace}'");
let _ = configmaps.delete(&name, &delete_params).await;
}
}
}
let secrets: Api<Secret> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = secrets.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
if name.starts_with("default-token-") {
continue;
}
log::info!("Deleting secret '{name}' in namespace '{namespace}'");
let _ = secrets.delete(&name, &delete_params).await;
}
}
}
let pvcs: Api<PersistentVolumeClaim> = Api::namespaced(self.client.clone(), namespace);
if let Ok(list) = pvcs.list(&Default::default()).await {
for item in list.items {
if let Some(name) = item.metadata.name {
log::info!("Deleting PVC '{name}' in namespace '{namespace}'");
let _ = pvcs.delete(&name, &delete_params).await;
}
}
}
log::info!("Finished deleting all resources in namespace '{namespace}'");
Ok(())
}
pub async fn namespace_reset(&self, name: &str) -> KubernetesResult<()> {
log::info!("Resetting namespace '{name}'...");
self.namespace_delete(name).await?;
let namespaces: Api<Namespace> = Api::all(self.client.clone());
let max_wait = std::time::Duration::from_secs(60);
let poll_interval = std::time::Duration::from_millis(500);
let start = std::time::Instant::now();
loop {
match namespaces.get(name).await {
Ok(ns) => {
let phase = ns.status.as_ref().and_then(|s| s.phase.as_deref());
if phase == Some("Terminating") {
log::info!("Namespace '{name}' is terminating, waiting...");
} else {
log::info!("Namespace '{name}' still exists, waiting for deletion...");
}
if start.elapsed() > max_wait {
return Err(KubernetesError::timeout(format!(
"Timeout waiting for namespace '{name}' to be deleted after {:?}",
max_wait
)));
}
tokio::time::sleep(poll_interval).await;
}
Err(kube::Error::Api(api_err)) if api_err.code == 404 => {
log::info!("Namespace '{name}' has been deleted");
break;
}
Err(e) => return Err(KubernetesError::ApiError(e)),
}
}
self.namespace_create(name).await?;
log::info!("Namespace '{name}' has been reset successfully");
Ok(())
}
pub async fn deploy_application(
&self,
name: &str,
image: &str,
replicas: i32,
port: i32,
labels: Option<HashMap<String, String>>,
env_vars: Option<HashMap<String, String>>,
) -> KubernetesResult<()> {
log::info!("Deploying application '{name}' with image '{image}'");
self.deployment_create(name, image, replicas, labels.clone(), env_vars)
.await?;
let selector = if let Some(ref labels) = labels {
labels.clone()
} else {
let mut default_selector = HashMap::new();
default_selector.insert("app".to_string(), name.to_string());
default_selector
};
self.service_create_lb(name, selector, port, Some(port))
.await?;
log::info!("Successfully deployed application '{name}'");
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct ServiceConnectionInfo {
pub name: String,
pub namespace: String,
pub hostname: String,
pub host_hostname: Option<String>,
pub ports: Vec<i32>,
pub node_ports: Vec<i32>,
pub cluster_ip: Option<String>,
pub external_ip: Option<String>,
pub service_type: String,
}
impl ServiceConnectionInfo {
pub fn connection_string(&self) -> String {
if let Some(port) = self.ports.first() {
format!("{}:{}", self.hostname, port)
} else {
self.hostname.clone()
}
}
pub fn display(&self) -> String {
let mut output = String::new();
output.push_str(&format!("Service: {}\n", self.name));
output.push_str(&format!("Namespace: {}\n", self.namespace));
output.push_str(&format!("Type: {}\n", self.service_type));
output.push_str(&format!("Hostname (in-cluster): {}\n", self.hostname));
if let Some(ref host) = self.host_hostname {
output.push_str(&format!("Hostname (from host): {}\n", host));
}
if !self.ports.is_empty() {
output.push_str(&format!(
"Ports: {}\n",
self.ports
.iter()
.map(|p| p.to_string())
.collect::<Vec<_>>()
.join(", ")
));
}
if !self.node_ports.is_empty() {
output.push_str(&format!(
"NodePorts: {}\n",
self.node_ports
.iter()
.map(|p| p.to_string())
.collect::<Vec<_>>()
.join(", ")
));
}
if let Some(ref ip) = self.cluster_ip {
output.push_str(&format!("ClusterIP: {}\n", ip));
}
if let Some(ref ip) = self.external_ip {
output.push_str(&format!("ExternalIP: {}\n", ip));
}
output
}
pub fn host_accessible_hostname(&self) -> &str {
self.host_hostname.as_deref().unwrap_or(&self.hostname)
}
pub fn localhost_connection_string(&self) -> Option<String> {
self.node_ports
.first()
.map(|port| format!("localhost:{}", port))
}
}
impl KubernetesManager {
pub async fn service_connection_info(
&self,
service_name: &str,
) -> KubernetesResult<ServiceConnectionInfo> {
let service = self.service_get(service_name).await?;
let namespace = service
.metadata
.namespace
.clone()
.unwrap_or_else(|| self.namespace.clone());
let name = service
.metadata
.name
.clone()
.unwrap_or_else(|| service_name.to_string());
let hostname = format!("{}.{}.svc.cluster.local", name, namespace);
let host_hostname = self.detect_host_hostname(&name, &namespace).await;
let (ports, node_ports) = service
.spec
.as_ref()
.and_then(|spec| spec.ports.as_ref())
.map(|ports| {
let p: Vec<i32> = ports.iter().map(|p| p.port).collect();
let np: Vec<i32> = ports.iter().filter_map(|p| p.node_port).collect();
(p, np)
})
.unwrap_or_default();
let cluster_ip = service
.spec
.as_ref()
.and_then(|spec| spec.cluster_ip.clone())
.filter(|ip| ip != "None");
let external_ip = service
.status
.as_ref()
.and_then(|status| status.load_balancer.as_ref())
.and_then(|lb| lb.ingress.as_ref())
.and_then(|ingress| ingress.first())
.and_then(|ing| ing.ip.clone());
let service_type = service
.spec
.as_ref()
.and_then(|spec| spec.type_.clone())
.unwrap_or_else(|| "ClusterIP".to_string());
Ok(ServiceConnectionInfo {
name,
namespace,
hostname,
host_hostname,
ports,
node_ports,
cluster_ip,
external_ip,
service_type,
})
}
async fn detect_host_hostname(&self, service_name: &str, namespace: &str) -> Option<String> {
if self.is_orbstack_environment().await {
return Some(format!("{}.{}.k8s.orb.local", service_name, namespace));
}
None
}
async fn is_orbstack_environment(&self) -> bool {
let nodes: Api<Node> = Api::all(self.client.clone());
if let Ok(node_list) = nodes.list(&Default::default()).await {
for node in node_list.items {
if let Some(name) = &node.metadata.name {
if name.contains("orbstack") {
return true;
}
}
if let Some(labels) = &node.metadata.labels {
for (key, value) in labels {
if key.contains("orbstack") || value.contains("orbstack") {
return true;
}
}
}
if let Some(annotations) = &node.metadata.annotations {
for (key, value) in annotations {
if key.contains("orbstack") || value.contains("orbstack") {
return true;
}
}
}
}
}
false
}
}
pub fn is_retryable_error(error: &kube::Error) -> bool {
match error {
kube::Error::HttpError(_) => true,
kube::Error::Api(api_error) => {
match api_error.code {
500..=599 => true,
429 => true,
409 => true,
400..=499 => false,
_ => true,
}
}
kube::Error::Auth(_) => false,
kube::Error::Discovery(_) => true,
_ => true,
}
}