#![forbid(unsafe_code)]
#![deny(missing_docs)]
#![deny(unused_must_use)]
#![deny(unused_mut)]
use async_std::{
fs::{File, OpenOptions},
io::prelude::SeekExt,
io::{ReadExt, SeekFrom, WriteExt},
};
use std::{
collections::HashMap,
path::PathBuf,
str::from_utf8,
};
use anyhow::{bail, Context, Error, Result};
use sha2::{Digest, Sha256};
pub enum OpenSettings {
CreateIfNotExists,
ErrorIfNotExists,
}
pub type UpgradeFunc =
fn(data: Vec<u8>, initial_version: u8, upgraded_version: u8) -> Result<Vec<u8>, Error>;
pub struct Upgrade {
pub initial_version: u8,
pub updated_version: u8,
pub process: UpgradeFunc,
}
#[derive(Debug)]
pub struct AtomicFile {
backup_file: File,
file: File,
identifier: String,
logical_data: Vec<u8>,
version: u8,
}
fn add_extension(path: &mut PathBuf, extension: &str) {
match path.extension() {
Some(ext) => {
let mut ext = ext.to_os_string();
ext.push(".");
ext.push(extension);
path.set_extension(ext)
}
None => path.set_extension(extension),
};
}
fn version_to_bytes(version: u8) -> [u8; 4] {
if version == 0 {
panic!("version is not allowed to be 0");
}
let mut version_string = format!("{}\n", version);
if version_string.len() == 2 {
version_string = format!("00{}", version_string);
} else if version_string.len() == 3 {
version_string = format!("0{}", version_string);
}
let version_bytes = version_string.as_bytes();
let mut version_arr = [0u8; 4];
version_arr.copy_from_slice(version_bytes);
version_arr
}
fn identifier_and_version_from_metadata(metadata: &[u8]) -> Result<(String, u8), Error> {
if metadata.len() < 4096 {
bail!("provided metadata is not the right size");
}
let version_str =
from_utf8(&metadata[13..16]).context("the on-disk version could not be parsed")?;
let version: u8 = version_str
.parse()
.context("unable to parse version of metadata")?;
let mut clean_identifier = false;
let mut identifier = "".to_string();
let mut atomic_identifier_offset = 0;
for i in 0..201 {
if metadata[i + 17] == '\n' as u8 {
clean_identifier = true;
atomic_identifier_offset = i + 18;
break;
}
if metadata[i + 17] > 127 {
bail!("identifier contains non-ascii characters before termination sequence");
}
identifier.push(metadata[i + 17] as char);
}
if !clean_identifier {
bail!("provided metadata does not have a legally terminating identifier");
}
let atomic_identifier = "DavidVorick/atomic_file-v1\n".as_bytes();
if metadata[atomic_identifier_offset..atomic_identifier_offset+27] != atomic_identifier[..] {
bail!("file does not appear to be an atomic file");
}
Ok((identifier, version))
}
impl AtomicFile {
fn fill_metadata(&self, buf: &mut [u8]) {
if buf.len() < 4096 {
panic!("misuse of fill_metadata, check stack trace");
}
if self.identifier.len() > 200 {
panic!(
"file has too-large identifier, ensure bounds checking is in place in open_file"
);
}
let version_bytes = version_to_bytes(self.version);
buf[12] = '\n' as u8;
buf[13..17].copy_from_slice(&version_bytes);
let iden_bytes = self.identifier.as_bytes();
buf[17..17 + iden_bytes.len()].copy_from_slice(iden_bytes);
buf[17 + iden_bytes.len()] = '\n' as u8;
buf[18 + iden_bytes.len()] = 255; let atomic_identifier = "DavidVorick/atomic_file-v1\n".as_bytes();
buf[18 + iden_bytes.len()..18 + iden_bytes.len() + 27].copy_from_slice(atomic_identifier);
buf[4095] = '\n' as u8;
let mut hasher = Sha256::new();
hasher.update(&buf[12..]);
let result = hasher.finalize();
let result_hex = hex::encode(result);
buf[..12].copy_from_slice(result_hex[..12].as_bytes());
}
pub fn contents(&self) -> Vec<u8> {
self.logical_data.clone()
}
pub async fn write_file(&mut self, buf: &[u8]) -> Result<(), Error> {
let mut full_data = vec![0u8; 4096 + buf.len()];
full_data[4096..].copy_from_slice(buf);
self.fill_metadata(&mut full_data);
self.backup_file
.set_len(full_data.len() as u64)
.await
.context("unable to set the length of the backup file")?;
self.backup_file
.seek(SeekFrom::Start(0))
.await
.context("unable to seek to start of backup file")?;
self.backup_file
.write_all(&full_data)
.await
.context("unable to write to backup file")?;
self.backup_file
.flush()
.await
.context("unable to flush backup file")?;
self.backup_file
.sync_all()
.await
.context("fsync of backup file failed")?;
self.file
.set_len(full_data.len() as u64)
.await
.context("unable to set the length of the backup file")?;
self.file
.seek(SeekFrom::Start(0))
.await
.context("unable to seek to start of backup file")?;
self.file
.write_all(&full_data)
.await
.context("unable to write to backup file")?;
self.file
.flush()
.await
.context("unable to flush backup file")?;
self.file
.sync_all()
.await
.context("fsync of backup file failed")?;
self.logical_data = buf.to_vec();
Ok(())
}
}
fn verify_upgrade_paths(
upgrade_paths: &Vec<Upgrade>,
current_version: u8,
latest_version: u8,
) -> Result<(), Error> {
if latest_version == 0 {
bail!("version 0 is not allowed for a VersionedFile");
}
let mut version_routes = HashMap::new();
for path in upgrade_paths {
if path.initial_version >= path.updated_version {
bail!("upgrade paths must always lead to a higher version number");
}
if version_routes.contains_key(&path.initial_version) {
bail!("upgrade paths can only have one upgrade for each version");
}
if path.updated_version > latest_version {
bail!("upgrade paths lead beyond the latest version");
}
if path.initial_version == 0 {
bail!("version 0 is not allowed for a VersionedFile");
}
version_routes.insert(path.initial_version, path.updated_version);
}
let mut complete_paths = HashMap::new();
complete_paths.insert(latest_version, {});
loop {
let mut progress = false;
let mut finished = true;
for (key, value) in &version_routes {
if complete_paths.contains_key(key) {
continue;
}
if complete_paths.contains_key(value) {
progress = true;
complete_paths.insert(*key, {});
} else {
finished = false;
}
}
if finished {
break;
}
if progress == false {
bail!("update graph is incomplete, not all nodes lead to the latest version");
}
}
if !complete_paths.contains_key(¤t_version) {
bail!("no upgrade found for current version of file");
}
Ok(())
}
async fn perform_file_upgrade(file: &mut AtomicFile, u: &Upgrade) -> Result<(), Error> {
if file.version != u.initial_version {
bail!("wrong update has been selected for this file");
}
let new_data = (u.process)(file.logical_data.clone(), u.initial_version, u.updated_version)
.context(format!(
"unable to complete file upgrade from version {} to {}",
u.initial_version, u.updated_version
))?;
file.logical_data = new_data;
file.version = u.updated_version;
Ok(())
}
async fn perform_file_upgrades(
file: &mut AtomicFile,
latest_version: u8,
upgrades: &Vec<Upgrade>,
) -> Result<(), Error> {
while file.version != latest_version {
let mut found = false;
for upgrade in upgrades {
if upgrade.initial_version == file.version {
perform_file_upgrade(file, upgrade)
.await
.context("unable to complete file upgrade")?;
file.version = upgrade.updated_version;
found = true;
break;
}
}
if !found {
panic!("attempting to perform file upgrades without a viable upgrade path");
}
}
Ok(())
}
pub async fn delete_file(filepath: &PathBuf) -> Result<(), Error> {
let mut main_path = filepath.clone();
let mut backup_path = filepath.clone();
add_extension(&mut main_path, "atomic_file");
async_std::fs::remove_file(main_path.clone()).await.context("unable to backup file")?;
add_extension(&mut backup_path, "atomic_file_backup");
async_std::fs::remove_file(backup_path.clone()).await.context("unable to remove main file")?;
Ok(())
}
pub fn exists(filepath: &PathBuf) -> bool {
let mut path = filepath.clone();
add_extension(&mut path, "atomic_file");
path.exists()
}
pub async fn open(filepath: &PathBuf, expected_identifier: &str) -> Result<AtomicFile, Error> {
open_file(filepath, expected_identifier, 1, &Vec::new(), OpenSettings::ErrorIfNotExists).await
}
pub async fn open_file(
filepath: &PathBuf,
expected_identifier: &str,
latest_version: u8,
upgrades: &Vec<Upgrade>,
open_settings: OpenSettings,
) -> Result<AtomicFile, Error> {
let path_str = filepath.to_str().context("could not stringify path")?;
if !path_str.is_ascii() {
bail!("path should be valid ascii");
}
if expected_identifier.len() > 200 {
bail!("the identifier of an atomic file cannot exceed 200 bytes");
}
if !expected_identifier.is_ascii() {
bail!("the identifier must be ascii");
}
if latest_version == 0 {
bail!("version is not allowed to be zero");
}
for c in expected_identifier.chars() {
if c == '\n' {
bail!("identifier is not allowed to contain newlines");
}
}
let create_if_not_exists = match open_settings {
OpenSettings::CreateIfNotExists => true,
OpenSettings::ErrorIfNotExists => false,
};
let mut filepath = filepath.clone();
let mut backup_filepath = filepath.clone();
add_extension(&mut filepath, "atomic_file");
add_extension(&mut backup_filepath, "atomic_file_backup");
let filepath_exists = filepath.exists();
if !create_if_not_exists && !filepath_exists {
bail!("file does not exist");
}
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filepath)
.await
.context("unable to open versioned file")?;
let mut backup_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(backup_filepath)
.await
.context("unable to open versioned file")?;
let file_md = file
.metadata()
.await
.context("unable to get file metadata")?;
let file_len = file_md.len();
if file_len >= 4096 {
let mut buf = vec![0u8; file_len as usize];
file.read_exact(&mut buf)
.await
.context("unable to read file")?;
let mut hasher = Sha256::new();
hasher.update(&buf[12..]);
let result = hasher.finalize();
let result_hex = hex::encode(result);
let result_hex_bytes = result_hex.as_bytes();
let override_value = [255u8; 6];
let override_hex = hex::encode(override_value);
let override_hex_bytes = override_hex.as_bytes();
if result_hex_bytes[..12] == buf[..12] || buf[..12] == override_hex_bytes[..] {
let (identifier, version) = identifier_and_version_from_metadata(&buf[..4096])
.context("unable to parse version and identifier from file metadata")?;
if identifier != expected_identifier {
bail!("file has the wrong identifier");
}
verify_upgrade_paths(&upgrades, version, latest_version)
.context("upgrade paths are invalid")?;
let mut atomic_file = AtomicFile {
backup_file,
file,
identifier,
logical_data: buf[4096..].to_vec(),
version,
};
perform_file_upgrades(&mut atomic_file, latest_version, upgrades)
.await
.context("unable to upgrade file")?;
return Ok(atomic_file);
}
}
let backup_file_md = backup_file
.metadata()
.await
.context("unable to get backup_file metadata")?;
let backup_file_len = backup_file_md.len();
if backup_file_len >= 4096 {
let mut buf = vec![0u8; backup_file_len as usize];
backup_file
.read_exact(&mut buf)
.await
.context("unable to read backup_file")?;
let mut hasher = Sha256::new();
hasher.update(&buf[12..]);
let result = hasher.finalize();
let result_hex = hex::encode(result);
let result_hex_bytes = result_hex.as_bytes();
if result_hex_bytes[..12] == buf[..12] {
let (identifier, version) = identifier_and_version_from_metadata(&buf[..4096])
.context("unable to parse version and identifier from file metadata")?;
if identifier != expected_identifier {
bail!("file has the wrong identifier");
}
verify_upgrade_paths(&upgrades, version, latest_version)
.context("upgrade paths are invalid")?;
let mut atomic_file = AtomicFile {
backup_file,
file,
identifier,
logical_data: buf[4096..].to_vec(),
version,
};
perform_file_upgrades(&mut atomic_file, latest_version, upgrades)
.await
.context("unable to upgrade file")?;
atomic_file
.file
.set_len(buf.len() as u64)
.await
.context("unable to set length of atomic file")?;
atomic_file
.file
.seek(SeekFrom::Start(0))
.await
.context("unable to seek in atomic file")?;
atomic_file
.file
.write_all(&buf)
.await
.context("unable to write backup data to atomic file")?;
atomic_file
.file
.sync_all()
.await
.context("unable to sync backup data to atomic file")?;
return Ok(atomic_file);
}
}
if file_len == 0 {
let mut af = AtomicFile {
backup_file,
file,
identifier: expected_identifier.to_string(),
logical_data: Vec::new(),
version: latest_version,
};
af.write_file(&Vec::new()).await.context("unable to create new file")?;
return Ok(af);
}
bail!("there appears to have been unrecoverable file corruption");
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Seek, Write};
use testdir::testdir;
use OpenSettings::{
CreateIfNotExists,
ErrorIfNotExists,
};
fn stub_upgrade(v: Vec<u8>, _: u8, _: u8) -> Result<Vec<u8>, Error> {
Ok(v)
}
fn smoke_upgrade_1_2(
data: Vec<u8>,
initial_version: u8,
updated_version: u8,
) -> Result<Vec<u8>, Error> {
if initial_version != 1 || updated_version != 2 {
bail!("this upgrade is intended to take the file from version 1 to version 2");
}
if data.len() != 9 {
bail!("file is wrong len");
}
if data != b"test_data" {
bail!(format!("file appears corrupt: {:?}", data));
}
Ok(b"test".to_vec())
}
fn smoke_upgrade_2_3(
data: Vec<u8>,
initial_version: u8,
updated_version: u8,
) -> Result<Vec<u8>, Error> {
if initial_version != 2 || updated_version != 3 {
bail!("this upgrade is intended to take the file from version 2 to version 3");
}
if data.len() != 4 {
bail!("file is wrong len");
}
if data != b"test" {
bail!("file appears corrupt");
}
Ok(b"testtest".to_vec())
}
fn smoke_upgrade_3_4(
data: Vec<u8>,
initial_version: u8,
updated_version: u8,
) -> Result<Vec<u8>, Error> {
if initial_version != 3 || updated_version != 4 {
bail!("this upgrade is intended to take the file from version 1 to version 2");
}
if data.len() != 8 {
bail!("file is wrong len");
}
if data != b"testtest" {
bail!("file appears corrupt");
}
Ok(b"testtesttest".to_vec())
}
async fn smoke_test() {
let dir = testdir!();
let test_dat = dir.join("test.dat");
open_file(&test_dat, "versioned_file::test.dat", 0, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap_err();
open_file(&test_dat, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap();
open_file(&test_dat, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap();
open_file(&test_dat, "bad_versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap_err();
let invalid_name = dir.join("❄️"); open_file(&invalid_name, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap_err();
let invalid_id = dir.join("invalid_identifier.dat");
open_file(&invalid_id, "versioned_file::test.dat::❄️", 1, &Vec::new(), CreateIfNotExists)
.await
.context("unable to create versioned file")
.unwrap_err();
let mut file = open_file(&test_dat, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.unwrap();
file.write_file(b"test_data").await.unwrap();
let file = open_file(&test_dat, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.unwrap();
if file.contents().len() != 9 {
panic!("file has unexpected len");
}
if &file.contents() != b"test_data" {
panic!("data read does not match data written");
}
open_file(&test_dat, "versioned_file::test.dat", 1, &Vec::new(), CreateIfNotExists)
.await
.unwrap();
let mut upgrade_chain = vec![Upgrade {
initial_version: 1,
updated_version: 2,
process: smoke_upgrade_1_2,
}];
let file = open_file(&test_dat, "versioned_file::test.dat", 2, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
if file.contents().len() != 4 {
panic!("file has wrong len");
}
if &file.contents() != b"test" {
panic!("data read does not match data written");
}
open_file(&test_dat, "versioned_file::test.dat", 2, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
upgrade_chain.push(Upgrade {
initial_version: 2,
updated_version: 3,
process: smoke_upgrade_2_3,
});
upgrade_chain.push(Upgrade {
initial_version: 3,
updated_version: 4,
process: smoke_upgrade_3_4,
});
let file = open_file(&test_dat, "versioned_file::test.dat", 4, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
if file.contents().len() != 12 {
panic!("file has wrong len");
}
if &file.contents() != b"testtesttest" {
panic!("data read does not match data written");
}
drop(file);
open_file(&test_dat, "versioned_file::test.dat", 4, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
let mut test_main = test_dat.clone();
add_extension(&mut test_main, "atomic_file");
let original_data = std::fs::read(&test_main).unwrap();
std::fs::write(&test_main, b"file corruption!").unwrap();
open_file(&test_dat, "versioned_file::test.dat", 4, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
let repaired_data = std::fs::read(&test_main).unwrap();
assert!(repaired_data == original_data);
let mut raw_file_name = test_dat.clone();
add_extension(&mut raw_file_name, "atomic_file");
println!("{:?}", raw_file_name);
let mut raw_file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.open(raw_file_name)
.unwrap();
raw_file.set_len(4096).unwrap();
raw_file.write("ffff".as_bytes()).unwrap();
let shorter_file = open_file(&test_dat, "versioned_file::test.dat", 4, &upgrade_chain, CreateIfNotExists)
.await
.unwrap();
assert!(shorter_file.contents().len() != 0);
raw_file.set_len(4096).unwrap();
raw_file.seek(std::io::SeekFrom::Start(0)).unwrap();
raw_file.write("ffffffffffff".as_bytes()).unwrap();
let shorter_file = open(&test_dat, "versioned_file::test.dat")
.await
.unwrap();
assert!(shorter_file.contents().len() == 0);
delete_file(&test_dat).await.unwrap();
open_file(&test_dat, "versioned_file::test.dat::after_delete", 1, &Vec::new(), CreateIfNotExists)
.await
.unwrap();
assert!(exists(&test_dat));
open(&test_dat, "versioned_file::test.dat::after_delete").await.unwrap();
delete_file(&test_dat).await.unwrap();
open(&test_dat, "versioned_file::test.dat::after_delete").await.unwrap_err();
assert!(!exists(&test_dat));
open_file(&test_dat, "versioned_file::test.dat::after_delete", 1, &Vec::new(), ErrorIfNotExists)
.await
.unwrap_err();
open_file(&test_dat, "versioned_file::test.dat::after_delete", 1, &Vec::new(), ErrorIfNotExists)
.await
.unwrap_err();
let mut f = open_file(&test_dat, "versioned_file::test.dat::after_delete", 1, &Vec::new(), CreateIfNotExists)
.await
.unwrap();
f.write_file("this is where the real file data is stored!".as_bytes()).await.unwrap();
}
#[async_std::test]
async fn smoke_test_async_std() {
smoke_test().await;
}
#[tokio::test]
async fn smoke_test_tokio() {
smoke_test().await;
}
#[test]
fn test_verify_upgrade_paths() {
verify_upgrade_paths(&Vec::new(), 0, 0).unwrap_err(); verify_upgrade_paths(&Vec::new(), 0, 1).unwrap_err(); verify_upgrade_paths(&Vec::new(), 1, 1).unwrap();
verify_upgrade_paths(&Vec::new(), 2, 2).unwrap();
verify_upgrade_paths(&Vec::new(), 255, 255).unwrap();
verify_upgrade_paths(
&vec![Upgrade {
initial_version: 1,
updated_version: 2,
process: stub_upgrade,
}],
1,
2,
)
.unwrap();
verify_upgrade_paths(
&vec![Upgrade {
initial_version: 2,
updated_version: 2,
process: stub_upgrade,
}],
2,
2,
)
.unwrap_err();
verify_upgrade_paths(
&vec![Upgrade {
initial_version: 1,
updated_version: 2,
process: stub_upgrade,
}],
1,
3,
)
.unwrap_err();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 2,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
],
1,
3,
)
.unwrap();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 2,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
],
1,
3,
)
.unwrap_err();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
],
1,
3,
)
.unwrap();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
],
1,
2,
)
.unwrap_err();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 3,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 4,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 5,
updated_version: 6,
process: stub_upgrade,
},
],
1,
6,
)
.unwrap();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 3,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 4,
updated_version: 6,
process: stub_upgrade,
},
],
1,
6,
)
.unwrap();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 3,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 4,
updated_version: 6,
process: stub_upgrade,
},
],
5,
6,
)
.unwrap_err();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 5,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 2,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 3,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 1,
updated_version: 3,
process: stub_upgrade,
},
Upgrade {
initial_version: 4,
updated_version: 6,
process: stub_upgrade,
},
],
1,
6,
)
.unwrap();
verify_upgrade_paths(
&vec![
Upgrade {
initial_version: 2,
updated_version: 5,
process: stub_upgrade,
},
Upgrade {
initial_version: 6,
updated_version: 7,
process: stub_upgrade,
},
Upgrade {
initial_version: 3,
updated_version: 6,
process: stub_upgrade,
},
Upgrade {
initial_version: 1,
updated_version: 4,
process: stub_upgrade,
},
Upgrade {
initial_version: 4,
updated_version: 6,
process: stub_upgrade,
},
],
1,
6,
)
.unwrap_err();
}
#[test]
fn test_version_to_bytes() {
assert!(&version_to_bytes(1) == b"001\n");
assert!(&version_to_bytes(2) == b"002\n");
assert!(&version_to_bytes(9) == b"009\n");
assert!(&version_to_bytes(10) == b"010\n");
assert!(&version_to_bytes(39) == b"039\n");
assert!(&version_to_bytes(139) == b"139\n");
}
}