lcpfs 2026.1.102

LCP File System - A ZFS-inspired copy-on-write filesystem for Rust
// Copyright 2025 LunaOS Contributors
// SPDX-License-Identifier: Apache-2.0

//! # S3 Gateway
//!
//! Native S3-compatible API gateway for LCPFS. Exposes datasets as S3 buckets
//! with full support for common S3 operations.
//!
//! ## Overview
//!
//! The S3 Gateway provides a minimal, standards-compliant S3 interface that allows
//! existing S3-compatible tools and applications to interact with LCPFS storage.
//! Each LCPFS dataset can be exposed as an S3 bucket with versioning backed by
//! snapshots.
//!
//! ## Features
//!
//! - **Bucket Operations**: ListBuckets, CreateBucket, DeleteBucket, HeadBucket
//! - **Object Operations**: PutObject, GetObject, DeleteObject, HeadObject, CopyObject
//! - **Multipart Uploads**: CreateMultipartUpload, UploadPart, CompleteMultipartUpload
//! - **Listing**: ListObjectsV2 with prefix/delimiter support
//! - **Versioning**: Object versions via LCPFS snapshots
//! - **Authentication**: AWS Signature Version 4 (SigV4)
//!
//! ## Architecture
//!
//! ```text
//! ┌─────────────────────────────────────────────────────────────┐
//! │                      S3 Gateway                             │
//! │   ┌─────────────┐  ┌─────────────┐  ┌─────────────────┐    │
//! │   │ HTTP Server │──│ S3 Parser   │──│ SigV4 Auth      │    │
//! │   └─────────────┘  └─────────────┘  └─────────────────┘    │
//! │          │                                   │              │
//! │          ▼                                   ▼              │
//! │   ┌─────────────────────────────────────────────────────┐  │
//! │   │              S3 Operations Handler                  │  │
//! │   │  ┌─────────┐  ┌─────────┐  ┌─────────┐  ┌────────┐ │  │
//! │   │  │ Buckets │  │ Objects │  │Multipart│  │  XML   │ │  │
//! │   │  └─────────┘  └─────────┘  └─────────┘  └────────┘ │  │
//! │   └─────────────────────────────────────────────────────┘  │
//! │                            │                                │
//! └────────────────────────────┼────────────────────────────────┘
//!//! ┌─────────────────────────────────────────────────────────────┐
//! │                    StorageProvider                          │
//! │   (Maps S3 operations to LCPFS datasets/snapshots/files)   │
//! └─────────────────────────────────────────────────────────────┘
//! ```
//!
//! ## Usage
//!
//! ```rust,ignore
//! use lcpfs::s3::{S3Gateway, S3GatewayConfig, StorageProvider, NetworkProvider};
//!
//! // Implement StorageProvider for your LCPFS pool
//! struct LcpfsStorage { /* pool reference */ }
//! impl StorageProvider for LcpfsStorage {
//!     // Map buckets to datasets, objects to files
//! }
//!
//! // Implement NetworkProvider for your network stack
//! struct MyNetwork { /* ... */ }
//! impl NetworkProvider for MyNetwork {
//!     // Provide TCP listener
//! }
//!
//! // Create and run the gateway
//! let config = S3GatewayConfig {
//!     bind_addr: [0, 0, 0, 0],
//!     port: 9000,
//!     access_key: "minioadmin".into(),
//!     secret_key: "minioadmin".into(),
//!     region: "us-east-1".into(),
//!     ..Default::default()
//! };
//!
//! let gateway = S3Gateway::new(storage, network, config);
//! gateway.run()?;
//! ```
//!
//! ## AWS CLI Compatibility
//!
//! ```bash
//! # Configure AWS CLI to use the gateway
//! aws configure set aws_access_key_id minioadmin
//! aws configure set aws_secret_access_key minioadmin
//!
//! # List buckets
//! aws --endpoint-url http://localhost:9000 s3 ls
//!
//! # Upload a file
//! aws --endpoint-url http://localhost:9000 s3 cp file.txt s3://mybucket/
//!
//! # List objects
//! aws --endpoint-url http://localhost:9000 s3 ls s3://mybucket/
//! ```
//!
//! ## MinIO Client Compatibility
//!
//! ```bash
//! # Configure mc alias
//! mc alias set lcpfs http://localhost:9000 minioadmin minioadmin
//!
//! # Basic operations
//! mc ls lcpfs
//! mc mb lcpfs/newbucket
//! mc cp file.txt lcpfs/newbucket/
//! ```

mod auth;
mod gateway;
mod http;
mod ops;
mod parser;
mod types;
mod xml;

// Re-export core types
pub use auth::{AuthResult, verify_signature};
pub use gateway::{GatewayError, S3Gateway};
pub use http::{
    HttpConnection, HttpParseError, IoError, NetworkProvider, TcpListenerTrait, TcpStreamTrait,
    parse_request, write_response,
};
pub use ops::{DatasetInfo, FileInfo, S3Ops, StorageProvider, VersionInfo};
pub use parser::{extract_bucket_from_host, normalize_request, parse_s3_request};
pub use types::{
    BucketInfo, CompletePart, HttpMethod, HttpRequest, HttpResponse, ListObjectsParams,
    ListObjectsResult, MultipartUpload, S3Error, S3GatewayConfig, S3ObjectMeta, S3ObjectVersion,
    S3Operation, S3Request, UploadPart,
};
pub use xml::{
    bucket_location_xml, bucket_versioning_xml, complete_multipart_xml, compute_etag,
    copy_object_xml, delete_objects_xml, format_timestamp, initiate_multipart_xml,
    list_buckets_xml, list_multipart_uploads_xml, list_object_versions_xml, list_objects_v2_xml,
    list_parts_xml, parse_complete_multipart_parts, parse_delete_objects,
};

#[cfg(test)]
mod tests {
    use super::*;
    use alloc::string::ToString;

    #[test]
    fn test_module_exports() {
        // Verify core types are accessible
        let _method = HttpMethod::Get;
        let _op = S3Operation::ListBuckets;
        let _err = S3Error::NoSuchBucket;

        // Verify config is accessible
        let config = S3GatewayConfig::default();
        assert_eq!(config.port, 9000);
        assert_eq!(config.region, "us-east-1");
    }

    #[test]
    fn test_parse_s3_request_export() {
        use alloc::collections::BTreeMap;

        let http_req = HttpRequest {
            method: HttpMethod::Get,
            path: "/".to_string(),
            query: BTreeMap::new(),
            headers: BTreeMap::new(),
            body: alloc::vec![],
        };

        let s3_req = parse_s3_request(&http_req);
        assert_eq!(s3_req.operation, S3Operation::ListBuckets);
    }

    #[test]
    fn test_xml_exports() {
        let xml = list_buckets_xml("owner123", "Owner Name", &[]);
        assert!(xml.contains("<ListAllMyBucketsResult"));
        assert!(xml.contains("owner123"));
    }
}