1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.

//! Utility for bundling target binaries as tarfiles.

use anyhow::{anyhow, Result};
use serde_derive::Deserialize;
use std::fs::{File, OpenOptions};
use std::path::{Path, PathBuf};
use tar::Builder;
use tokio::io::{AsyncSeekExt, AsyncWriteExt};

// Path to the blob S3 Bucket.
const S3_BUCKET: &str = "https://oxide-omicron-build.s3.amazonaws.com";
// Name for the directory component where downloaded blobs are stored.
const BLOB: &str = "blob";

// Downloads "source" from S3_BUCKET to "destination".
async fn download(source: &str, destination: &Path) -> Result<()> {
    println!(
        "Downloading {} to {}",
        source,
        destination.to_string_lossy()
    );
    let response = reqwest::get(format!("{}/{}", S3_BUCKET, source)).await?;
    let mut file = tokio::fs::File::create(destination).await?;
    file.write_all(&response.bytes().await?).await?;
    Ok(())
}

// Helper to open a tarfile for reading/writing.
fn open_tarfile(tarfile: &Path) -> Result<File> {
    OpenOptions::new()
        .write(true)
        .read(true)
        .truncate(true)
        .create(true)
        .open(&tarfile)
        .map_err(|err| anyhow!("Cannot create tarfile: {}", err))
}

// Returns the path as it should be placed within an archive, by
// prepending "root/".
fn archive_path(path: &Path) -> Result<PathBuf> {
    let leading_slash = std::path::MAIN_SEPARATOR.to_string();
    Ok(Path::new("root").join(&path.strip_prefix(leading_slash)?))
}

// Adds all parent directories of a path to the archive.
//
// For example, if we wanted to insert the file into the archive:
//
// - /opt/oxide/foo/bar.txt
//
// We could call the following:
//
// ```
// let path = Path::new("/opt/oxide/foo/bar.txt");
// add_directory_and_parents(&mut archive, path.parent().unwrap());
// ```
//
// Which would add the following directories to the archive:
//
// - /root
// - /root/opt
// - /root/opt/oxide
// - /root/opt/oxide/foo
fn add_directory_and_parents<W: std::io::Write>(
    archive: &mut tar::Builder<W>,
    to: &Path,
) -> Result<()> {
    let mut parents: Vec<&Path> = to.ancestors().collect::<Vec<&Path>>();
    parents.reverse();

    for parent in parents {
        let dst = archive_path(&parent)?;
        archive.append_dir(&dst, ".")?;
    }

    Ok(())
}

/// A single package.
#[derive(Deserialize, Debug)]
pub struct Package {
    /// The name of the service name to be used on the target OS.
    pub service_name: String,

    /// A list of blobs from the Omicron build S3 bucket which should be placed
    /// within this package.
    pub blobs: Option<Vec<PathBuf>>,

    /// Configuration for packages containing Rust binaries.
    pub rust: Option<RustPackage>,

    /// A set of mapped paths which appear within the archive.
    #[serde(default)]
    pub paths: Vec<MappedPath>,

    /// Identifies if the package should be packaged into a zone image.
    pub zone: bool,
}

impl Package {
    pub fn get_output_path(&self, output_directory: &Path) -> PathBuf {
        if self.zone {
            output_directory.join(format!("{}.tar.gz", self.service_name))
        } else {
            output_directory.join(format!("{}.tar", self.service_name))
        }
    }

    /// Constructs the package file in the output directory.
    pub async fn create(&self, output_directory: &Path) -> Result<File> {
        if self.zone {
            self.create_zone_package(output_directory).await
        } else {
            self.create_tarball_package(output_directory).await
        }
    }

    // Adds blobs from S3 to the package.
    //
    // - `archive`: The archive to add the blobs into
    // - `package`: The package being constructed
    // - `download_directory`: The location to which the blobs should be downloaded
    // - `destination_path`: The destination path of the blobs within the archive
    async fn add_blobs<W: std::io::Write>(
        &self,
        archive: &mut Builder<W>,
        download_directory: &Path,
        destination_path: &Path,
    ) -> Result<()> {
        if let Some(blobs) = &self.blobs {
            let blobs_path = download_directory.join(&self.service_name);
            std::fs::create_dir_all(&blobs_path)?;
            for blob in blobs {
                let blob_path = blobs_path.join(blob);
                // TODO: Check against hash, download if mismatch (i.e.,
                // corruption/update).
                if !blob_path.exists() {
                    download(&blob.to_string_lossy(), &blob_path).await?;
                }
            }
            archive.append_dir_all(&destination_path, &blobs_path)?;
        }
        Ok(())
    }

    async fn create_zone_package(&self, output_directory: &Path) -> Result<File> {
        // Create a tarball which will become an Omicron-brand image
        // archive.
        let tarfile = self.get_output_path(output_directory);
        let file = open_tarfile(&tarfile)?;
        let gzw = flate2::write::GzEncoder::new(file, flate2::Compression::fast());
        let mut archive = Builder::new(gzw);
        archive.mode(tar::HeaderMode::Deterministic);

        // The first file in the archive must always be a JSON file
        // which identifies the format of the rest of the archive.
        //
        // See the OMICRON1(5) man page for more detail.
        let mut root_json = tokio::fs::File::from_std(tempfile::tempfile()?);
        let contents = r#"{"v":"1","t":"layer"}"#;
        root_json.write_all(contents.as_bytes()).await?;
        root_json.seek(std::io::SeekFrom::Start(0)).await?;
        archive.append_file("oxide.json", &mut root_json.into_std().await)?;

        // Add mapped paths.
        for path in &self.paths {
            add_directory_and_parents(&mut archive, path.to.parent().unwrap())?;
            let dst = archive_path(&path.to)?;
            archive.append_dir_all(dst, &path.from)?;
        }

        // Attempt to add the rust binary, if one was built.
        if let Some(rust_pkg) = &self.rust {
            let dst = Path::new("/opt/oxide").join(&self.service_name).join("bin");
            add_directory_and_parents(&mut archive, &dst)?;
            let dst = archive_path(&dst)?;
            rust_pkg.add_binaries_to_archive(&mut archive, &dst)?;
        }

        // Add (and possibly download) blobs
        let blob_dst = Path::new("/opt/oxide").join(&self.service_name).join(BLOB);
        self.add_blobs(&mut archive, output_directory, &archive_path(&blob_dst)?)
            .await?;

        let file = archive
            .into_inner()
            .map_err(|err| anyhow!("Failed to finalize archive: {}", err))?;

        Ok(file.finish()?)
    }

    async fn create_tarball_package(&self, output_directory: &Path) -> Result<File> {
        // Create a tarball containing the necessary executable and auxiliary
        // files.
        let tarfile = self.get_output_path(output_directory);
        let file = open_tarfile(&tarfile)?;
        // TODO: We could add compression here, if we'd like?
        let mut archive = Builder::new(file);
        archive.mode(tar::HeaderMode::Deterministic);

        // Add mapped paths.
        for path in &self.paths {
            archive.append_dir_all(&path.to, &path.from)?;
        }

        // Attempt to add the rust binary, if one was built.
        if let Some(rust_pkg) = &self.rust {
            rust_pkg.add_binaries_to_archive(&mut archive, Path::new(""))?;
        }

        // Add (and possibly download) blobs
        self.add_blobs(&mut archive, output_directory, &Path::new(BLOB))
            .await?;

        let file = archive
            .into_inner()
            .map_err(|err| anyhow!("Failed to finalize archive: {}", err))?;

        Ok(file)
    }
}

/// Describes configuration for a package which contains a Rust binary.
#[derive(Deserialize, Debug)]
pub struct RustPackage {
    /// The name of the compiled binary to be used.
    // TODO: Could be extrapolated to "produced build artifacts", we don't
    // really care about the individual binary file.
    pub binary_names: Vec<String>,

    /// True if the package has been built in release mode.
    pub release: bool,
}

impl RustPackage {
    // Adds a rust binary to the archive.
    //
    // - `archive`: The archive to which the binary should be added
    // - `dst_directory`: The path where the binary should be added in the archive
    fn add_binaries_to_archive<W: std::io::Write>(
        &self,
        archive: &mut tar::Builder<W>,
        dst_directory: &Path,
    ) -> Result<()> {
        for name in &self.binary_names {
            archive
                .append_path_with_name(
                    Self::local_binary_path(&name, self.release),
                    dst_directory.join(&name),
                )
                .map_err(|err| anyhow!("Cannot append binary to tarfile: {}", err))?;
        }
        Ok(())
    }

    // Returns the path to the compiled binary.
    fn local_binary_path(name: &str, release: bool) -> PathBuf {
        format!(
            "target/{}/{}",
            if release { "release" } else { "debug" },
            name,
        )
        .into()
    }
}

/// A pair of paths, mapping from a directory on the host to the target.
#[derive(Deserialize, Debug)]
pub struct MappedPath {
    /// Source path.
    pub from: PathBuf,
    /// Destination path.
    pub to: PathBuf,
}