#![allow(dead_code)]
#![allow(missing_docs)]
pub mod batch_processing;
pub mod converters;
pub mod datasets;
pub mod optimization;
pub mod quantization;
pub mod serving;
pub mod types;
pub mod utils;
pub mod validation;
pub use batch_processing::{BatchProcessor, DataLoader};
pub use converters::{
get_converter, CoreMLConverter, HuggingFaceConverter, JAXConverter, MLFrameworkConverter,
MXNetConverter, ONNXConverter, PyTorchConverter, SafeTensorsConverter, TensorFlowConverter,
};
pub use datasets::MLDataset;
pub use optimization::{ModelOptimizer, OptimizationTechnique};
pub use quantization::{ModelQuantizer, QuantizationMethod, QuantizedModel, QuantizedTensor};
pub use serving::{
ApiConfig, HealthStatus, InferenceRequest, InferenceResponse, LoadBalancer, ModelInfo,
ModelServer, ResponseStatus, ServerConfig, ServerMetrics,
};
pub use types::{DataType, MLFramework, MLModel, MLTensor, ModelMetadata, TensorMetadata};
pub use validation::{BatchValidator, ModelValidator, ValidationConfig, ValidationReport};
use crate::error::{IoError, Result};
use scirs2_core::ndarray::{Array2, ArrayD, ArrayView2, IxDyn};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
#[cfg(feature = "async")]
use tokio::{
fs,
sync::{Mutex, RwLock},
time::{sleep, Duration, Instant},
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
#[cfg(feature = "async")]
use std::collections::VecDeque;