use super::profiles::{HpcDetection, HpcPartition, HpcProfile};
pub fn dane_profile() -> HpcProfile {
HpcProfile {
name: "dane".to_string(),
display_name: "LLNL Dane".to_string(),
description: "Lawrence Livermore National Laboratory's Dane cluster".to_string(),
detection: vec![HpcDetection::EnvVar {
name: "CLUSTER".to_string(),
value: "dane".to_string(),
}],
default_account: None,
partitions: dane_partitions(),
charge_factor_cpu: 1.0,
charge_factor_gpu: 1.0, metadata: [
(
"documentation".to_string(),
"https://hpc.llnl.gov/hardware/compute-platforms/dane".to_string(),
),
("organization".to_string(), "LLNL".to_string()),
]
.into_iter()
.collect(),
}
}
fn dane_partitions() -> Vec<HpcPartition> {
vec![
HpcPartition {
name: "pdebug".to_string(),
description: "Debug partition for developing and testing jobs".to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: false,
requires_explicit_request: true,
default_qos: None,
features: vec!["debug".to_string()],
},
HpcPartition {
name: "pbatch".to_string(),
description: "Default batch partition for standard jobs (max 1 day)".to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 24 * 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: false,
requires_explicit_request: false, default_qos: None,
features: vec![],
},
HpcPartition {
name: "pci".to_string(),
description: "CI partition for continuous integration jobs".to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 24 * 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: false,
requires_explicit_request: true,
default_qos: None,
features: vec!["ci".to_string()],
},
HpcPartition {
name: "pserial".to_string(),
description: "Serial partition for long-running jobs (max 7 days)".to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 7 * 24 * 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: false,
requires_explicit_request: true,
default_qos: None,
features: vec!["serial".to_string()],
},
HpcPartition {
name: "pjupyter".to_string(),
description: "Partition for Jupyter notebook sessions".to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 24 * 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: true, requires_explicit_request: true,
default_qos: None,
features: vec!["jupyter".to_string()],
},
HpcPartition {
name: "pall".to_string(),
description: "Special partition with unlimited walltime (requires special access)"
.to_string(),
cpus_per_node: 112,
memory_mb: 257_054,
max_walltime_secs: 365 * 24 * 3600, max_nodes: None,
max_nodes_per_user: None,
min_nodes: None,
gpus_per_node: None,
gpu_type: None,
gpu_memory_gb: None,
local_disk_gb: None,
shared: false,
requires_explicit_request: true,
default_qos: None,
features: vec!["special".to_string()],
},
]
}