use anyhow::Result;
use clap::Subcommand;
use crate::config::Config;
pub mod analysis;
pub mod args;
pub mod benchmarking;
pub mod conversion;
pub mod enhanced_profiling;
pub mod enhanced_serialization;
pub mod optimization;
pub mod profiling;
pub mod pytorch_parser;
pub mod real_benchmarking;
pub mod serialization;
pub mod tensor_integration;
pub mod types;
pub mod validation;
pub use args::*;
#[derive(Subcommand)]
pub enum ModelCommands {
Convert(ConvertArgs),
Optimize(OptimizeArgs),
Quantize(QuantizeArgs),
Prune(PruneArgs),
Inspect(InspectArgs),
Validate(ValidateArgs),
Benchmark(BenchmarkArgs),
Compress(CompressArgs),
Extract(ExtractArgs),
Merge(MergeArgs),
}
pub async fn execute(cmd: ModelCommands, config: &Config, output_format: &str) -> Result<()> {
match cmd {
ModelCommands::Convert(args) => {
conversion::convert_model(args, config, output_format).await
}
ModelCommands::Optimize(args) => {
optimization::optimize_model(args, config, output_format).await
}
ModelCommands::Quantize(args) => {
optimization::quantize_model(args, config, output_format).await
}
ModelCommands::Prune(args) => optimization::prune_model(args, config, output_format).await,
ModelCommands::Inspect(args) => analysis::inspect_model(args, config, output_format).await,
ModelCommands::Validate(args) => {
analysis::validate_model(args, config, output_format).await
}
ModelCommands::Benchmark(args) => {
benchmarking::benchmark_model(args, config, output_format).await
}
ModelCommands::Compress(args) => {
conversion::compress_model(args, config, output_format).await
}
ModelCommands::Extract(args) => {
conversion::extract_model(args, config, output_format).await
}
ModelCommands::Merge(args) => conversion::merge_model(args, config, output_format).await,
}
}