commonware_deployer/aws/mod.rs
1//! AWS EC2 deployer
2//!
3//! Deploy a custom binary (and configuration) to any number of EC2 instances across multiple regions. View metrics and logs
4//! from all instances with Grafana.
5//!
6//! # Features
7//!
8//! * Automated creation, update, and destruction of EC2 instances across multiple regions
9//! * Provide a unique name, instance type, region, binary, and configuration for each deployed instance
10//! * Collect metrics, profiles (when enabled), and logs from all deployed instances on a long-lived monitoring instance
11//! (accessible only to the deployer's IP)
12//!
13//! # Architecture
14//!
15//! ```txt
16//! Deployer's Machine (Public IP)
17//! |
18//! |
19//! v
20//! +-----------------------------------+
21//! | Monitoring VPC (us-east-1) |
22//! | - Monitoring Instance |
23//! | - Prometheus |
24//! | - Loki |
25//! | - Pyroscope |
26//! | - Tempo |
27//! | - Grafana |
28//! | - Security Group |
29//! | - All: Deployer IP |
30//! | - 3100: Binary VPCs |
31//! | - 4040: Binary VPCs |
32//! | - 4318: Binary VPCs |
33//! +-----------------------------------+
34//! ^ ^
35//! (Telemetry) (Telemetry)
36//! | |
37//! | |
38//! +------------------------------+ +------------------------------+
39//! | Binary VPC 1 | | Binary VPC 2 |
40//! | - Binary Instance | | - Binary Instance |
41//! | - Binary A | | - Binary B |
42//! | - Promtail | | - Promtail |
43//! | - Node Exporter | | - Node Exporter |
44//! | - Pyroscope Agent | | - Pyroscope Agent |
45//! | - Security Group | | - Security Group |
46//! | - All: Deployer IP | | - All: Deployer IP |
47//! | - 9090: Monitoring IP | | - 9090: Monitoring IP |
48//! | - 9100: Monitoring IP | | - 9100: Monitoring IP |
49//! | - 8012: 0.0.0.0/0 | | - 8765: 12.3.7.9/32 |
50//! +------------------------------+ +------------------------------+
51//! ```
52//!
53//! ## Instances
54//!
55//! ### Monitoring
56//!
57//! * Deployed in `us-east-1` with a configurable instance type (e.g., `t4g.small` for ARM64, `t3.small` for x86_64) and storage (e.g., 10GB gp2). Architecture is auto-detected from the instance type.
58//! * Runs:
59//! * **Prometheus**: Scrapes binary metrics from all instances at `:9090` and system metrics from all instances at `:9100`.
60//! * **Loki**: Listens at `:3100`, storing logs in `/loki/chunks` with a TSDB index at `/loki/index`.
61//! * **Pyroscope**: Listens at `:4040`, storing profiles in `/var/lib/pyroscope`.
62//! * **Tempo**: Listens at `:4318`, storing traces in `/var/lib/tempo`.
63//! * **Grafana**: Hosted at `:3000`, provisioned with Prometheus, Loki, and Tempo datasources and a custom dashboard.
64//! * Ingress:
65//! * Allows deployer IP access (TCP 0-65535).
66//! * Binary instance traffic to Loki (TCP 3100) and Tempo (TCP 4318).
67//!
68//! ### Binary
69//!
70//! * Deployed in user-specified regions with configurable ARM64 or AMD64 instance types and storage.
71//! * Run:
72//! * **Custom Binary**: Executes with `--hosts=/home/ubuntu/hosts.yaml --config=/home/ubuntu/config.conf`, exposing metrics at `:9090`.
73//! * **Promtail**: Forwards `/var/log/binary.log` to Loki on the monitoring instance.
74//! * **Node Exporter**: Exposes system metrics at `:9100`.
75//! * **Pyroscope Agent**: Forwards `perf` profiles to Pyroscope on the monitoring instance.
76//! * Ingress:
77//! * Deployer IP access (TCP 0-65535).
78//! * Monitoring IP access to `:9090` and `:9100` for Prometheus.
79//! * User-defined ports from the configuration.
80//!
81//! ## Networking
82//!
83//! ### VPCs
84//!
85//! One per region with CIDR `10.<region-index>.0.0/16` (e.g., `10.0.0.0/16` for `us-east-1`).
86//!
87//! ### Subnets
88//!
89//! One subnet per availability zone that supports any required instance type in the region
90//! (e.g., `10.<region-index>.<az-index>.0/24`), linked to a shared route table with an internet gateway.
91//! Each instance is placed in an AZ that supports its instance type, distributed round-robin across
92//! eligible AZs, with automatic fallback to other AZs on capacity errors.
93//!
94//! ### VPC Peering
95//!
96//! Connects the monitoring VPC to each binary VPC, with routes added to route tables for private communication.
97//!
98//! ### Security Groups
99//!
100//! Separate for monitoring (tag) and binary instances (`{tag}-binary`), dynamically configured for deployer and inter-instance traffic.
101//!
102//! # Workflow
103//!
104//! ## `aws create`
105//!
106//! 1. Validates configuration and generates an SSH key pair, stored in `$HOME/.commonware_deployer/{tag}/id_rsa_{tag}`.
107//! 2. Persists deployment metadata (tag, regions, instance names) to `$HOME/.commonware_deployer/{tag}/metadata.yaml`.
108//! This enables `destroy --tag` cleanup if creation fails.
109//! 3. Ensures the shared S3 bucket exists and caches observability tools (Prometheus, Grafana, Loki, etc.) if not already present.
110//! 4. Uploads deployment-specific files (binaries, configs) to S3.
111//! 5. Creates VPCs, subnets, internet gateways, route tables, and security groups per region (concurrently).
112//! 6. Establishes VPC peering between the monitoring region and binary regions.
113//! 7. Launches the monitoring instance.
114//! 8. Launches binary instances.
115//! 9. Caches all static config files and uploads per-instance configs (hosts.yaml, promtail, pyroscope) to S3.
116//! 10. Configures monitoring and binary instances in parallel via SSH (BBR, service installation, service startup).
117//! 11. Updates the monitoring security group to allow telemetry traffic from binary instances.
118//! 12. Marks completion with `$HOME/.commonware_deployer/{tag}/created`.
119//!
120//! ## `aws update`
121//!
122//! Performs rolling updates across all binary instances:
123//!
124//! 1. Uploads the latest binary and configuration to S3.
125//! 2. For each instance (up to `--concurrency` at a time, default 128):
126//! a. Stops the `binary` service.
127//! b. Downloads the updated files from S3 via pre-signed URLs.
128//! c. Restarts the `binary` service.
129//! d. Waits for the service to become active before proceeding.
130//!
131//! _Use `--concurrency 1` for fully sequential updates that wait for each instance to be healthy
132//! before updating the next._
133//!
134//! ## `aws authorize`
135//!
136//! 1. Obtains the deployer's current public IP address (or parses the one provided).
137//! 2. For each security group in the deployment, adds an ingress rule for the IP (if it doesn't already exist).
138//!
139//! ## `aws destroy`
140//!
141//! Can be invoked with either `--config <path>` or `--tag <tag>`. When using `--tag`, the command
142//! reads regions from the persisted `metadata.yaml` file, allowing destruction without the original
143//! config file.
144//!
145//! 1. Terminates all instances across regions.
146//! 2. Deletes security groups, subnets, route tables, VPC peering connections, internet gateways, key pairs, and VPCs in dependency order.
147//! 3. Deletes deployment-specific data from S3 (cached tools remain for future deployments).
148//! 4. Marks destruction with `$HOME/.commonware_deployer/{tag}/destroyed`, retaining the directory to prevent tag reuse.
149//!
150//! ## `aws clean`
151//!
152//! 1. Deletes the shared S3 bucket and all its contents (cached tools and any remaining deployment data).
153//! 2. Use this to fully clean up when you no longer need the deployer cache.
154//!
155//! ## `aws list`
156//!
157//! Lists all active deployments (created but not destroyed). For each deployment, displays the tag,
158//! creation timestamp, regions, and number of instances.
159//!
160//! ## `aws profile`
161//!
162//! 1. Loads the deployment configuration and locates the specified instance.
163//! 2. Caches the samply binary in S3 if not already present.
164//! 3. SSHes to the instance, downloads samply, and records a CPU profile of the running binary for the specified duration.
165//! 4. Downloads the profile locally via SCP.
166//! 5. Opens Firefox Profiler with symbols resolved from your local debug binary.
167//!
168//! # Profiling
169//!
170//! The deployer supports two profiling modes:
171//!
172//! ## Continuous Profiling (Pyroscope)
173//!
174//! Enable continuous CPU profiling by setting `profiling: true` in your instance config. This runs
175//! Pyroscope in the background, continuously collecting profiles that are viewable in the Grafana
176//! dashboard on the monitoring instance.
177//!
178//! For best results, build and deploy your binary with debug symbols and frame pointers:
179//!
180//! ```bash
181//! CARGO_PROFILE_RELEASE_DEBUG=true RUSTFLAGS="-C force-frame-pointers=yes" cargo build --release
182//! ```
183//!
184//! ## On-Demand Profiling (samply)
185//!
186//! To generate an on-demand CPU profile (viewable in the Firefox Profiler UI), run the
187//! following:
188//!
189//! ```bash
190//! deployer aws profile --config config.yaml --instance <name> --binary <path-to-binary-with-debug>
191//! ```
192//!
193//! This captures a 30-second profile (configurable with `--duration`) using samply on the remote
194//! instance, downloads it, and opens it in Firefox Profiler. Unlike Continuous Profiling, this mode
195//! does not require deploying a binary with debug symbols (reducing deployment time).
196//!
197//! Like above, build your binary with debug symbols (but not frame pointers):
198//!
199//! ```bash
200//! CARGO_PROFILE_RELEASE_DEBUG=true cargo build --release
201//! ```
202//!
203//! Now, strip symbols and deploy via `aws create` (preserve the original binary for profile symbolication
204//! when you run the `aws profile` command shown above):
205//!
206//! ```bash
207//! cp target/release/my-binary target/release/my-binary-debug
208//! strip target/release/my-binary
209//! ```
210//!
211//! # Persistence
212//!
213//! * A directory `$HOME/.commonware_deployer/{tag}` stores:
214//! * SSH private key (`id_rsa_{tag}`)
215//! * Deployment metadata (`metadata.yaml`) containing tag, creation timestamp, regions, and instance names
216//! * Status files (`created`, `destroyed`)
217//! * The deployment state is tracked via these files, ensuring operations respect prior create/destroy actions.
218//! * The `metadata.yaml` file enables `aws destroy --tag` and `aws list` to work without the original config file.
219//!
220//! ## S3 Caching
221//!
222//! A shared S3 bucket (`commonware-deployer-cache`) is used to cache deployment artifacts. The bucket
223//! uses a fixed name intentionally so that all users within the same AWS account share the cache. This
224//! design provides two benefits:
225//!
226//! 1. **Faster deployments**: Observability tools (Prometheus, Grafana, Loki, etc.) are downloaded from
227//! upstream sources once and cached in S3. Subsequent deployments by any user skip the download and
228//! use pre-signed URLs to fetch directly from S3.
229//!
230//! 2. **Reduced bandwidth**: Instead of requiring the deployer to push binaries to each instance,
231//! unique binaries are uploaded once to S3 and then pulled from there.
232//!
233//! Per-deployment data (binaries, configs, hosts files) is isolated under `deployments/{tag}/` to prevent
234//! conflicts between concurrent deployments.
235//!
236//! The bucket stores:
237//! * `tools/binaries/{tool}/{version}/{platform}/{filename}` - Tool binaries (e.g., prometheus, grafana)
238//! * `tools/configs/{deployer-version}/{component}/{file}` - Static configs and service files
239//! * `deployments/{tag}/` - Deployment-specific files:
240//! * `monitoring/` - Prometheus config, dashboard
241//! * `instances/{name}/` - Binary, config, hosts.yaml, promtail config, pyroscope script
242//!
243//! Tool binaries are namespaced by tool version and platform. Static configs are namespaced by deployer
244//! version to ensure cache invalidation when the deployer is updated.
245//!
246//! # Example Configuration
247//!
248//! ```yaml
249//! tag: ffa638a0-991c-442c-8ec4-aa4e418213a5
250//! monitoring:
251//! instance_type: t4g.small # ARM64 (Graviton)
252//! storage_size: 10
253//! storage_class: gp2
254//! dashboard: /path/to/dashboard.json
255//! instances:
256//! - name: node1
257//! region: us-east-1
258//! instance_type: t4g.small # ARM64 (Graviton)
259//! storage_size: 10
260//! storage_class: gp2
261//! binary: /path/to/binary-arm64
262//! config: /path/to/config.conf
263//! profiling: true
264//! - name: node2
265//! region: us-west-2
266//! instance_type: t3.small # x86_64 (Intel/AMD)
267//! storage_size: 10
268//! storage_class: gp2
269//! binary: /path/to/binary-x86
270//! config: /path/to/config2.conf
271//! profiling: false
272//! ports:
273//! - protocol: tcp
274//! port: 4545
275//! cidr: 0.0.0.0/0
276//! ```
277
278use serde::{Deserialize, Serialize};
279use std::net::IpAddr;
280
281cfg_if::cfg_if! {
282 if #[cfg(feature = "aws")] {
283 use std::path::PathBuf;
284 use thiserror::Error;
285
286 /// CPU architecture for EC2 instances
287 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
288 pub enum Architecture {
289 Arm64,
290 X86_64,
291 }
292
293 impl Architecture {
294 /// Returns the architecture string used in AMI names, download URLs, and labels
295 pub const fn as_str(&self) -> &'static str {
296 match self {
297 Self::Arm64 => "arm64",
298 Self::X86_64 => "amd64",
299 }
300 }
301
302 /// Returns the Linux library path component for jemalloc
303 pub const fn linux_lib(&self) -> &'static str {
304 match self {
305 Self::Arm64 => "aarch64-linux-gnu",
306 Self::X86_64 => "x86_64-linux-gnu",
307 }
308 }
309 }
310
311 impl std::fmt::Display for Architecture {
312 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
313 f.write_str(self.as_str())
314 }
315 }
316
317 /// Metadata persisted during deployment creation
318 #[derive(Serialize, Deserialize)]
319 pub struct Metadata {
320 pub tag: String,
321 pub created_at: u64,
322 pub regions: Vec<String>,
323 pub instance_count: usize,
324 }
325
326 mod create;
327 pub mod ec2;
328 pub mod services;
329 pub use create::create;
330 mod update;
331 pub use update::update;
332 mod authorize;
333 pub use authorize::authorize;
334 mod destroy;
335 pub use destroy::destroy;
336 mod clean;
337 pub use clean::clean;
338 mod profile;
339 pub use profile::profile;
340 mod list;
341 pub use list::list;
342 pub mod s3;
343 pub mod utils;
344
345 /// Name of the monitoring instance
346 const MONITORING_NAME: &str = "monitoring";
347
348 /// AWS region where monitoring instances are deployed
349 const MONITORING_REGION: &str = "us-east-1";
350
351 /// File name that indicates the deployment completed
352 const CREATED_FILE_NAME: &str = "created";
353
354 /// File name that indicates the deployment was destroyed
355 const DESTROYED_FILE_NAME: &str = "destroyed";
356
357 /// File name for deployment metadata
358 const METADATA_FILE_NAME: &str = "metadata.yaml";
359
360 /// Port on instance where system metrics are exposed
361 const SYSTEM_PORT: u16 = 9100;
362
363 /// Port on monitoring where logs are pushed
364 const LOGS_PORT: u16 = 3100;
365
366 /// Port on monitoring where profiles are pushed
367 const PROFILES_PORT: u16 = 4040;
368
369 /// Port on monitoring where traces are pushed
370 const TRACES_PORT: u16 = 4318;
371
372 /// Maximum instances to manipulate at one time
373 pub const DEFAULT_CONCURRENCY: &str = "128";
374
375 /// Subcommand name
376 pub const CMD: &str = "aws";
377
378 /// Create subcommand name
379 pub const CREATE_CMD: &str = "create";
380
381 /// Update subcommand name
382 pub const UPDATE_CMD: &str = "update";
383
384 /// Authorize subcommand name
385 pub const AUTHORIZE_CMD: &str = "authorize";
386
387 /// Destroy subcommand name
388 pub const DESTROY_CMD: &str = "destroy";
389
390 /// Clean subcommand name
391 pub const CLEAN_CMD: &str = "clean";
392
393 /// Profile subcommand name
394 pub const PROFILE_CMD: &str = "profile";
395
396 /// List subcommand name
397 pub const LIST_CMD: &str = "list";
398
399 /// Directory where deployer files are stored
400 fn deployer_directory(tag: Option<&str>) -> PathBuf {
401 let base_dir = std::env::var("HOME").expect("$HOME is not configured");
402 let path = PathBuf::from(base_dir).join(".commonware_deployer");
403 match tag {
404 Some(tag) => path.join(tag),
405 None => path,
406 }
407 }
408
409 /// S3 operations that can fail
410 #[derive(Debug, Clone, Copy)]
411 pub enum S3Operation {
412 CreateBucket,
413 DeleteBucket,
414 HeadObject,
415 ListObjects,
416 DeleteObjects,
417 }
418
419 /// Reasons why accessing a bucket may be forbidden
420 #[derive(Debug, Clone, Copy)]
421 pub enum BucketForbiddenReason {
422 /// Access denied (missing s3:ListBucket permission or bucket owned by another account)
423 AccessDenied,
424 }
425
426 impl std::fmt::Display for BucketForbiddenReason {
427 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
428 match self {
429 Self::AccessDenied => write!(
430 f,
431 "access denied (check IAM permissions or bucket ownership)"
432 ),
433 }
434 }
435 }
436
437 impl std::fmt::Display for S3Operation {
438 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
439 match self {
440 Self::CreateBucket => write!(f, "CreateBucket"),
441 Self::DeleteBucket => write!(f, "DeleteBucket"),
442 Self::HeadObject => write!(f, "HeadObject"),
443 Self::ListObjects => write!(f, "ListObjects"),
444 Self::DeleteObjects => write!(f, "DeleteObjects"),
445 }
446 }
447 }
448
449 /// Errors that can occur when deploying infrastructure on AWS
450 #[derive(Error, Debug)]
451 pub enum Error {
452 #[error("AWS EC2 error: {0}")]
453 AwsEc2(#[from] aws_sdk_ec2::Error),
454 #[error("AWS security group ingress error: {0}")]
455 AwsSecurityGroupIngress(#[from] aws_sdk_ec2::operation::authorize_security_group_ingress::AuthorizeSecurityGroupIngressError),
456 #[error("AWS describe instances error: {0}")]
457 AwsDescribeInstances(
458 #[from] aws_sdk_ec2::operation::describe_instances::DescribeInstancesError,
459 ),
460 #[error("S3 operation failed: {operation} on bucket '{bucket}'")]
461 AwsS3 {
462 bucket: String,
463 operation: S3Operation,
464 #[source]
465 source: Box<aws_sdk_s3::Error>,
466 },
467 #[error("S3 bucket '{bucket}' forbidden: {reason}")]
468 S3BucketForbidden {
469 bucket: String,
470 reason: BucketForbiddenReason,
471 },
472 #[error("IO error: {0}")]
473 Io(#[from] std::io::Error),
474 #[error("YAML error: {0}")]
475 Yaml(#[from] serde_yaml::Error),
476 #[error("creation already attempted")]
477 CreationAttempted,
478 #[error("invalid instance name: {0}")]
479 InvalidInstanceName(String),
480 #[error("reqwest error: {0}")]
481 Reqwest(#[from] reqwest::Error),
482 #[error("SSH failed")]
483 SshFailed,
484 #[error("keygen failed")]
485 KeygenFailed,
486 #[error("service timeout({0}): {1}")]
487 ServiceTimeout(String, String),
488 #[error("deployment does not exist: {0}")]
489 DeploymentDoesNotExist(String),
490 #[error("deployment is not complete: {0}")]
491 DeploymentNotComplete(String),
492 #[error("deployment already destroyed: {0}")]
493 DeploymentAlreadyDestroyed(String),
494 #[error("private key not found")]
495 PrivateKeyNotFound,
496 #[error("invalid IP address: {0}")]
497 IpAddrParse(#[from] std::net::AddrParseError),
498 #[error("IP address is not IPv4: {0}")]
499 IpAddrNotV4(std::net::IpAddr),
500 #[error("download failed: {0}")]
501 DownloadFailed(String),
502 #[error("S3 presigning config error: {0}")]
503 S3PresigningConfig(#[from] aws_sdk_s3::presigning::PresigningConfigError),
504 #[error("S3 presigning failed: {0}")]
505 S3PresigningFailed(
506 Box<aws_sdk_s3::error::SdkError<aws_sdk_s3::operation::get_object::GetObjectError>>,
507 ),
508 #[error("S3 builder error: {0}")]
509 S3Builder(#[from] aws_sdk_s3::error::BuildError),
510 #[error("duplicate instance name: {0}")]
511 DuplicateInstanceName(String),
512 #[error("instance not found: {0}")]
513 InstanceNotFound(String),
514 #[error("symbolication failed: {0}")]
515 Symbolication(String),
516 #[error("no subnet supports instance type: {0}")]
517 UnsupportedInstanceType(String),
518 #[error("no subnets available")]
519 NoSubnetsAvailable,
520 #[error("metadata not found for deployment: {0}")]
521 MetadataNotFound(String),
522 #[error("must specify either --config or --tag")]
523 MissingTagOrConfig,
524 #[error("regions not enabled: {0:?}")]
525 RegionsNotEnabled(Vec<String>),
526 }
527
528 impl From<aws_sdk_s3::error::SdkError<aws_sdk_s3::operation::get_object::GetObjectError>>
529 for Error
530 {
531 fn from(
532 err: aws_sdk_s3::error::SdkError<aws_sdk_s3::operation::get_object::GetObjectError>,
533 ) -> Self {
534 Self::S3PresigningFailed(Box::new(err))
535 }
536 }
537 }
538}
539
540/// Port on binary where metrics are exposed
541pub const METRICS_PORT: u16 = 9090;
542
543/// Host deployment information
544#[derive(Serialize, Deserialize, Clone)]
545pub struct Host {
546 /// Name of the host
547 pub name: String,
548
549 /// Region where the host is deployed
550 pub region: String,
551
552 /// Public IP address of the host
553 pub ip: IpAddr,
554}
555
556/// List of hosts
557#[derive(Serialize, Deserialize, Clone)]
558pub struct Hosts {
559 /// Private IP address of the monitoring instance
560 pub monitoring: IpAddr,
561
562 /// Hosts deployed across all regions
563 pub hosts: Vec<Host>,
564}
565
566/// Port configuration
567#[derive(Serialize, Deserialize, Clone)]
568pub struct PortConfig {
569 /// Protocol (e.g., "tcp")
570 pub protocol: String,
571
572 /// Port number
573 pub port: u16,
574
575 /// CIDR block
576 pub cidr: String,
577}
578
579/// Instance configuration
580#[derive(Serialize, Deserialize, Clone)]
581pub struct InstanceConfig {
582 /// Name of the instance
583 pub name: String,
584
585 /// AWS region where the instance is deployed
586 pub region: String,
587
588 /// Instance type (e.g., `t4g.small` for ARM64, `t3.small` for x86_64)
589 pub instance_type: String,
590
591 /// Storage size in GB
592 pub storage_size: i32,
593
594 /// Storage class (e.g., "gp2")
595 pub storage_class: String,
596
597 /// Path to the binary to deploy
598 pub binary: String,
599
600 /// Path to the binary configuration file
601 pub config: String,
602
603 /// Whether to enable profiling
604 pub profiling: bool,
605}
606
607/// Monitoring configuration
608#[derive(Serialize, Deserialize, Clone)]
609pub struct MonitoringConfig {
610 /// Instance type (e.g., `t4g.small` for ARM64, `t3.small` for x86_64)
611 pub instance_type: String,
612
613 /// Storage size in GB
614 pub storage_size: i32,
615
616 /// Storage class (e.g., "gp2")
617 pub storage_class: String,
618
619 /// Path to a custom dashboard file that is automatically
620 /// uploaded to grafana
621 pub dashboard: String,
622}
623
624/// Deployer configuration
625#[derive(Serialize, Deserialize, Clone)]
626pub struct Config {
627 /// Unique tag for the deployment
628 pub tag: String,
629
630 /// Monitoring instance configuration
631 pub monitoring: MonitoringConfig,
632
633 /// Instance configurations
634 pub instances: Vec<InstanceConfig>,
635
636 /// Ports open on all instances
637 pub ports: Vec<PortConfig>,
638}