use alloc::collections::BTreeMap;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HttpMethod {
Get,
Put,
Post,
Delete,
Head,
Options,
}
impl HttpMethod {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_uppercase().as_str() {
"GET" => Some(HttpMethod::Get),
"PUT" => Some(HttpMethod::Put),
"POST" => Some(HttpMethod::Post),
"DELETE" => Some(HttpMethod::Delete),
"HEAD" => Some(HttpMethod::Head),
"OPTIONS" => Some(HttpMethod::Options),
_ => None,
}
}
pub fn as_str(&self) -> &'static str {
match self {
HttpMethod::Get => "GET",
HttpMethod::Put => "PUT",
HttpMethod::Post => "POST",
HttpMethod::Delete => "DELETE",
HttpMethod::Head => "HEAD",
HttpMethod::Options => "OPTIONS",
}
}
}
#[derive(Debug, Clone)]
pub struct HttpRequest {
pub method: HttpMethod,
pub path: String,
pub query: BTreeMap<String, String>,
pub headers: BTreeMap<String, String>,
pub body: Vec<u8>,
}
impl HttpRequest {
pub fn new(method: HttpMethod, path: String) -> Self {
Self {
method,
path,
query: BTreeMap::new(),
headers: BTreeMap::new(),
body: Vec::new(),
}
}
pub fn header(&self, name: &str) -> Option<&String> {
let lower = name.to_lowercase();
self.headers
.iter()
.find(|(k, _)| k.to_lowercase() == lower)
.map(|(_, v)| v)
}
pub fn content_length(&self) -> usize {
self.header("content-length")
.and_then(|s| s.parse().ok())
.unwrap_or(0)
}
pub fn content_type(&self) -> Option<&String> {
self.header("content-type")
}
}
#[derive(Debug, Clone)]
pub struct HttpResponse {
pub status: u16,
pub headers: BTreeMap<String, String>,
pub body: Vec<u8>,
}
impl HttpResponse {
pub fn new(status: u16) -> Self {
Self {
status,
headers: BTreeMap::new(),
body: Vec::new(),
}
}
pub fn ok() -> Self {
Self::new(200)
}
pub fn no_content() -> Self {
Self::new(204)
}
pub fn not_found() -> Self {
Self::new(404)
}
pub fn error(status: u16, code: &str, message: &str) -> Self {
let xml = xml_error(code, message);
let mut resp = Self::new(status);
resp.body = xml.into_bytes();
resp.headers
.insert("Content-Type".into(), "application/xml".into());
resp
}
pub fn with_header(mut self, name: impl Into<String>, value: impl Into<String>) -> Self {
self.headers.insert(name.into(), value.into());
self
}
pub fn with_body(mut self, body: Vec<u8>) -> Self {
self.body = body;
self
}
pub fn with_xml(mut self, xml: String) -> Self {
self.body = xml.into_bytes();
self.headers
.insert("Content-Type".into(), "application/xml".into());
self
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum S3Operation {
ListBuckets,
CreateBucket,
DeleteBucket,
HeadBucket,
ListObjectsV2,
GetBucketLocation,
GetBucketVersioning,
PutBucketVersioning,
PutObject,
GetObject,
DeleteObject,
HeadObject,
CopyObject,
DeleteObjects,
CreateMultipartUpload,
UploadPart,
CompleteMultipartUpload,
AbortMultipartUpload,
ListParts,
ListMultipartUploads,
ListObjectVersions,
Unknown(String),
}
impl S3Operation {
pub fn name(&self) -> &str {
match self {
S3Operation::ListBuckets => "ListBuckets",
S3Operation::CreateBucket => "CreateBucket",
S3Operation::DeleteBucket => "DeleteBucket",
S3Operation::HeadBucket => "HeadBucket",
S3Operation::ListObjectsV2 => "ListObjectsV2",
S3Operation::GetBucketLocation => "GetBucketLocation",
S3Operation::GetBucketVersioning => "GetBucketVersioning",
S3Operation::PutBucketVersioning => "PutBucketVersioning",
S3Operation::PutObject => "PutObject",
S3Operation::GetObject => "GetObject",
S3Operation::DeleteObject => "DeleteObject",
S3Operation::HeadObject => "HeadObject",
S3Operation::CopyObject => "CopyObject",
S3Operation::DeleteObjects => "DeleteObjects",
S3Operation::CreateMultipartUpload => "CreateMultipartUpload",
S3Operation::UploadPart => "UploadPart",
S3Operation::CompleteMultipartUpload => "CompleteMultipartUpload",
S3Operation::AbortMultipartUpload => "AbortMultipartUpload",
S3Operation::ListParts => "ListParts",
S3Operation::ListMultipartUploads => "ListMultipartUploads",
S3Operation::ListObjectVersions => "ListObjectVersions",
S3Operation::Unknown(s) => s,
}
}
}
#[derive(Debug, Clone)]
pub struct S3Request {
pub method: HttpMethod,
pub bucket: Option<String>,
pub key: Option<String>,
pub operation: S3Operation,
pub headers: BTreeMap<String, String>,
pub query: BTreeMap<String, String>,
pub body: Vec<u8>,
}
impl S3Request {
pub fn query_param(&self, name: &str) -> Option<&String> {
self.query.get(name)
}
pub fn header(&self, name: &str) -> Option<&String> {
let lower = name.to_lowercase();
self.headers
.iter()
.find(|(k, _)| k.to_lowercase() == lower)
.map(|(_, v)| v)
}
pub fn copy_source(&self) -> Option<&String> {
self.header("x-amz-copy-source")
}
pub fn upload_id(&self) -> Option<&String> {
self.query.get("uploadId")
}
pub fn part_number(&self) -> Option<u32> {
self.query.get("partNumber").and_then(|s| s.parse().ok())
}
pub fn range(&self) -> Option<(u64, Option<u64>)> {
let range = self.header("range")?;
parse_range(range)
}
}
fn parse_range(range: &str) -> Option<(u64, Option<u64>)> {
let range = range.strip_prefix("bytes=")?;
let parts: Vec<&str> = range.split('-').collect();
if parts.len() != 2 {
return None;
}
let start: u64 = parts[0].parse().ok()?;
let end = if parts[1].is_empty() {
None
} else {
Some(parts[1].parse().ok()?)
};
Some((start, end))
}
#[derive(Debug, Clone)]
pub struct BucketInfo {
pub name: String,
pub creation_date: String,
}
#[derive(Debug, Clone)]
pub struct S3ObjectMeta {
pub key: String,
pub size: u64,
pub etag: String,
pub last_modified: String,
pub storage_class: String,
pub content_type: String,
pub metadata: BTreeMap<String, String>,
}
impl S3ObjectMeta {
pub fn new(key: String, size: u64, etag: String, last_modified: String) -> Self {
Self {
key,
size,
etag,
last_modified,
storage_class: "STANDARD".into(),
content_type: "application/octet-stream".into(),
metadata: BTreeMap::new(),
}
}
}
#[derive(Debug, Clone)]
pub struct S3ObjectVersion {
pub key: String,
pub version_id: String,
pub is_latest: bool,
pub last_modified: String,
pub etag: String,
pub size: u64,
pub storage_class: String,
}
#[derive(Debug, Clone, Default)]
pub struct ListObjectsParams {
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub max_keys: u32,
pub continuation_token: Option<String>,
pub start_after: Option<String>,
pub encoding_type: Option<String>,
}
impl ListObjectsParams {
pub fn from_query(query: &BTreeMap<String, String>) -> Self {
Self {
prefix: query.get("prefix").cloned(),
delimiter: query.get("delimiter").cloned(),
max_keys: query
.get("max-keys")
.and_then(|s| s.parse().ok())
.unwrap_or(1000),
continuation_token: query.get("continuation-token").cloned(),
start_after: query.get("start-after").cloned(),
encoding_type: query.get("encoding-type").cloned(),
}
}
}
#[derive(Debug, Clone)]
pub struct ListObjectsResult {
pub contents: Vec<S3ObjectMeta>,
pub common_prefixes: Vec<String>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
pub key_count: usize,
}
#[derive(Debug, Clone)]
pub struct MultipartUpload {
pub upload_id: String,
pub bucket: String,
pub key: String,
pub parts: BTreeMap<u32, UploadPart>,
pub initiated: u64,
}
#[derive(Debug, Clone)]
pub struct UploadPart {
pub part_number: u32,
pub etag: String,
pub size: u64,
pub last_modified: u64,
}
#[derive(Debug, Clone)]
pub struct CompletePart {
pub part_number: u32,
pub etag: String,
}
#[derive(Debug, Clone)]
pub struct S3GatewayConfig {
pub bind_addr: String,
pub port: u16,
pub access_key: String,
pub secret_key: String,
pub region: String,
pub default_dataset: Option<String>,
pub bucket_map: BTreeMap<String, String>,
pub allow_anonymous: bool,
pub enable_versioning: bool,
}
impl Default for S3GatewayConfig {
fn default() -> Self {
Self {
bind_addr: "0.0.0.0".into(),
port: 9000,
access_key: "AKIAIOSFODNN7EXAMPLE".into(),
secret_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY".into(),
region: "us-east-1".into(),
default_dataset: None,
bucket_map: BTreeMap::new(),
allow_anonymous: false,
enable_versioning: true,
}
}
}
impl S3GatewayConfig {
pub fn new(access_key: String, secret_key: String) -> Self {
Self {
access_key,
secret_key,
..Default::default()
}
}
pub fn map_bucket(&mut self, bucket: &str, dataset: &str) {
self.bucket_map.insert(bucket.into(), dataset.into());
}
pub fn dataset_for_bucket(&self, bucket: &str) -> Option<&String> {
self.bucket_map
.get(bucket)
.or(self.default_dataset.as_ref())
}
}
#[derive(Debug, Clone)]
pub enum S3Error {
AccessDenied,
InvalidAccessKeyId,
SignatureDoesNotMatch,
NoSuchBucket,
NoSuchKey,
BucketAlreadyExists,
BucketNotEmpty,
InvalidBucketName,
InvalidArgument(String),
InvalidPart,
InvalidPartOrder,
NoSuchUpload,
EntityTooSmall,
EntityTooLarge,
MethodNotAllowed,
InternalError(String),
NotImplemented,
}
impl S3Error {
pub fn code(&self) -> &'static str {
match self {
S3Error::AccessDenied => "AccessDenied",
S3Error::InvalidAccessKeyId => "InvalidAccessKeyId",
S3Error::SignatureDoesNotMatch => "SignatureDoesNotMatch",
S3Error::NoSuchBucket => "NoSuchBucket",
S3Error::NoSuchKey => "NoSuchKey",
S3Error::BucketAlreadyExists => "BucketAlreadyExists",
S3Error::BucketNotEmpty => "BucketNotEmpty",
S3Error::InvalidBucketName => "InvalidBucketName",
S3Error::InvalidArgument(_) => "InvalidArgument",
S3Error::InvalidPart => "InvalidPart",
S3Error::InvalidPartOrder => "InvalidPartOrder",
S3Error::NoSuchUpload => "NoSuchUpload",
S3Error::EntityTooSmall => "EntityTooSmall",
S3Error::EntityTooLarge => "EntityTooLarge",
S3Error::MethodNotAllowed => "MethodNotAllowed",
S3Error::InternalError(_) => "InternalError",
S3Error::NotImplemented => "NotImplemented",
}
}
pub fn message(&self) -> String {
match self {
S3Error::AccessDenied => "Access Denied".into(),
S3Error::InvalidAccessKeyId => "The access key ID you provided does not exist".into(),
S3Error::SignatureDoesNotMatch => "The signature does not match".into(),
S3Error::NoSuchBucket => "The specified bucket does not exist".into(),
S3Error::NoSuchKey => "The specified key does not exist".into(),
S3Error::BucketAlreadyExists => "The bucket already exists".into(),
S3Error::BucketNotEmpty => "The bucket is not empty".into(),
S3Error::InvalidBucketName => "The bucket name is invalid".into(),
S3Error::InvalidArgument(msg) => msg.clone(),
S3Error::InvalidPart => "One or more parts specified were not found".into(),
S3Error::InvalidPartOrder => "Parts must be uploaded in order".into(),
S3Error::NoSuchUpload => "The upload ID does not exist".into(),
S3Error::EntityTooSmall => "Part is too small".into(),
S3Error::EntityTooLarge => "Entity is too large".into(),
S3Error::MethodNotAllowed => "Method not allowed".into(),
S3Error::InternalError(msg) => msg.clone(),
S3Error::NotImplemented => "Not implemented".into(),
}
}
pub fn status_code(&self) -> u16 {
match self {
S3Error::AccessDenied => 403,
S3Error::InvalidAccessKeyId => 403,
S3Error::SignatureDoesNotMatch => 403,
S3Error::NoSuchBucket => 404,
S3Error::NoSuchKey => 404,
S3Error::BucketAlreadyExists => 409,
S3Error::BucketNotEmpty => 409,
S3Error::InvalidBucketName => 400,
S3Error::InvalidArgument(_) => 400,
S3Error::InvalidPart => 400,
S3Error::InvalidPartOrder => 400,
S3Error::NoSuchUpload => 404,
S3Error::EntityTooSmall => 400,
S3Error::EntityTooLarge => 400,
S3Error::MethodNotAllowed => 405,
S3Error::InternalError(_) => 500,
S3Error::NotImplemented => 501,
}
}
pub fn to_response(&self) -> HttpResponse {
HttpResponse::error(self.status_code(), self.code(), &self.message())
}
}
pub fn xml_error(code: &str, message: &str) -> String {
alloc::format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Error><Code>{}</Code><Message>{}</Message></Error>",
xml_escape(code),
xml_escape(message)
)
}
pub fn xml_escape(s: &str) -> String {
let mut result = String::with_capacity(s.len());
for c in s.chars() {
match c {
'<' => result.push_str("<"),
'>' => result.push_str(">"),
'&' => result.push_str("&"),
'"' => result.push_str("""),
'\'' => result.push_str("'"),
_ => result.push(c),
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_http_method() {
assert_eq!(HttpMethod::from_str("GET"), Some(HttpMethod::Get));
assert_eq!(HttpMethod::from_str("put"), Some(HttpMethod::Put));
assert_eq!(HttpMethod::from_str("INVALID"), None);
assert_eq!(HttpMethod::Get.as_str(), "GET");
}
#[test]
fn test_http_request() {
let mut req = HttpRequest::new(HttpMethod::Get, "/bucket/key".into());
req.headers
.insert("Content-Type".into(), "text/plain".into());
req.headers.insert("Content-Length".into(), "100".into());
assert_eq!(req.content_type(), Some(&"text/plain".to_string()));
assert_eq!(req.content_length(), 100);
}
#[test]
fn test_http_response() {
let resp = HttpResponse::ok()
.with_header("X-Custom", "value")
.with_body(b"test".to_vec());
assert_eq!(resp.status, 200);
assert_eq!(resp.headers.get("X-Custom"), Some(&"value".to_string()));
assert_eq!(resp.body, b"test");
}
#[test]
fn test_parse_range() {
assert_eq!(parse_range("bytes=0-100"), Some((0, Some(100))));
assert_eq!(parse_range("bytes=50-"), Some((50, None)));
assert_eq!(parse_range("invalid"), None);
}
#[test]
fn test_s3_error() {
let err = S3Error::NoSuchKey;
assert_eq!(err.code(), "NoSuchKey");
assert_eq!(err.status_code(), 404);
}
#[test]
fn test_xml_escape() {
assert_eq!(xml_escape("<test>"), "<test>");
assert_eq!(xml_escape("a&b"), "a&b");
assert_eq!(xml_escape("\"quote\""), ""quote"");
}
#[test]
fn test_xml_error() {
let xml = xml_error("NoSuchKey", "Key not found");
assert!(xml.contains("<Code>NoSuchKey</Code>"));
assert!(xml.contains("<Message>Key not found</Message>"));
}
#[test]
fn test_config_default() {
let config = S3GatewayConfig::default();
assert_eq!(config.port, 9000);
assert_eq!(config.region, "us-east-1");
}
#[test]
fn test_config_bucket_map() {
let mut config = S3GatewayConfig::default();
config.map_bucket("mybucket", "tank/data");
assert_eq!(
config.dataset_for_bucket("mybucket"),
Some(&"tank/data".to_string())
);
assert_eq!(config.dataset_for_bucket("unknown"), None);
}
#[test]
fn test_list_params() {
let mut query = BTreeMap::new();
query.insert("prefix".into(), "folder/".into());
query.insert("delimiter".into(), "/".into());
query.insert("max-keys".into(), "100".into());
let params = ListObjectsParams::from_query(&query);
assert_eq!(params.prefix, Some("folder/".into()));
assert_eq!(params.delimiter, Some("/".into()));
assert_eq!(params.max_keys, 100);
}
#[test]
fn test_object_meta() {
let meta = S3ObjectMeta::new(
"test.txt".into(),
1000,
"abc123".into(),
"2024-01-01T00:00:00Z".into(),
);
assert_eq!(meta.key, "test.txt");
assert_eq!(meta.size, 1000);
assert_eq!(meta.storage_class, "STANDARD");
}
#[test]
fn test_s3_operation_name() {
assert_eq!(S3Operation::ListBuckets.name(), "ListBuckets");
assert_eq!(S3Operation::PutObject.name(), "PutObject");
assert_eq!(S3Operation::Unknown("Foo".into()).name(), "Foo");
}
}