//! Enhanced model details functionality tests for `api_ollama` crate.
//!
//! These tests verify the comprehensive model details system that provides
//! detailed model metadata, local model management, model pull/push operations,
//! and comprehensive model information capabilities.
#![ cfg( feature = "model_details" ) ]
#![ allow( clippy::std_instead_of_core ) ] // async/futures require std
use api_ollama::{ OllamaClient, EnhancedModelDetails, ModelMetadata, ModelLifecycle, ModelOperation };
use api_ollama::{ PullModelRequest, PushModelRequest, ShowModelRequest };
use core::time::Duration;
use std::sync::Arc;
use tokio::time::Instant;
use futures_util::StreamExt;
#[ tokio::test ]
async fn test_enhanced_model_details_structure_and_creation()
{
// Test creating enhanced model details with comprehensive metadata
let model_details = EnhancedModelDetails::new("llama2:7b")
.with_metadata(ModelMetadata {
name: "llama2".to_string(),
tag: "7b".to_string(),
size: 3_800_000_000,
digest: "sha256:abc123".to_string(),
modified_at: "2024-01-15T10:30:00Z".to_string(),
format: "gguf".to_string(),
family: "llama".to_string(),
families: vec!["llama".to_string(), "transformer".to_string()],
parameter_size: "7B".to_string(),
quantization_level: "q4_0".to_string(),
architecture: "LlamaForCausalLM".to_string(),
license: "Apache 2.0".to_string(),
template: "{{.System}}\n{{.Prompt}}".to_string(),
system_prompt: None,
parameters: std::collections::HashMap::new(),
})
.with_lifecycle_status(ModelLifecycle::Ready)
.with_download_progress(100.0)
.with_performance_metrics(api_ollama::ModelPerformanceMetrics {
average_tokens_per_second: 25.5,
peak_memory_usage: 4_200_000_000,
last_inference_time: Duration::from_millis(450),
total_inference_count: 156,
});
assert_eq!(model_details.name(), "llama2:7b");
assert_eq!(model_details.metadata().size, 3_800_000_000);
assert_eq!(model_details.lifecycle_status(), &ModelLifecycle::Ready);
assert!((model_details.download_progress() - 100.0).abs() < f64::EPSILON);
assert!(model_details.performance_metrics().is_some());
}
#[ tokio::test ]
async fn test_detailed_model_metadata_retrieval()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Test retrieving detailed model metadata
let model_name = "llama2:7b";
let show_request = ShowModelRequest::new(model_name)
.with_verbose(true)
.with_template_info(true)
.with_parameters_info(true)
.with_system_info(true);
let result = client.show_model_detailed(show_request).await;
if let Ok(enhanced_details) = result
{
assert_eq!(enhanced_details.name(), model_name);
assert!(enhanced_details.metadata().size > 0);
assert!(!enhanced_details.metadata().digest.is_empty());
assert!(!enhanced_details.metadata().format.is_empty());
assert!(!enhanced_details.metadata().family.is_empty());
assert!(!enhanced_details.metadata().parameter_size.is_empty());
assert!(enhanced_details.metadata().template.contains("{{"));
assert_eq!(enhanced_details.lifecycle_status(), &ModelLifecycle::Ready);
} else {
// Expected to fail initially - model details enhancement not implemented
// Expected failure - enhanced model details not implemented
}
}
#[ tokio::test ]
async fn test_local_model_management_capabilities()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Test listing all local models with enhanced details
let local_models_result = client.list_models_detailed().await;
if let Ok(models) = local_models_result
{
for model in models
{
assert!(!model.name().is_empty());
assert!(model.metadata().size > 0);
assert!(!model.metadata().digest.is_empty());
assert!(matches!(model.lifecycle_status(),
ModelLifecycle::Ready | ModelLifecycle::Loading | ModelLifecycle::Downloading));
}
} else {
// Expected to fail initially - enhanced list models not implemented
// Expected failure - enhanced model listing not implemented
}
// Test checking if specific model exists locally
let model_exists = client.model_exists_locally("llama2:7b").await;
if let Ok(exists) = model_exists
{
// Validate exists is a proper boolean field (no assertion needed as it's always valid)
let _ = exists;
} else {
// Expected to fail initially - model existence check not implemented
// Expected failure - model existence check not implemented
}
// Test getting local model storage information
let storage_info = client.get_local_model_storage_info().await;
if let Ok(info) = storage_info
{
assert!(info.total_models > 0);
assert!(info.total_size_bytes > 0);
assert!(!info.storage_path.is_empty());
assert!(info.available_space_bytes > 0);
} else {
// Expected to fail initially - storage info not implemented
// Expected failure - model storage info not implemented
}
}
#[ tokio::test ]
async fn test_model_pull_operations_with_progress()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
let pull_request = PullModelRequest::new("tinyllama:1.1b")
.with_insecure(false)
.with_progress_tracking(true);
// Test model pull with progress monitoring
let pull_result = client.pull_model_with_progress(pull_request).await;
if let Ok(mut progress_stream) = pull_result
{
// Should receive progress updates during download
let mut progress_updates = 0;
while let Some(progress) = progress_stream.next().await
{
match progress
{
Ok(update) => {
// Verify completed field exists (u64 is always >= 0)
let _ = update.completed;
assert!(update.total > 0);
assert!(update.completed <= update.total);
progress_updates += 1;
},
Err(_) => break,
}
if progress_updates >= 3 { break; } // Don't download entire model in test
}
assert!(progress_updates > 0);
} else {
// Expected to fail initially - enhanced pull not implemented
// Expected failure - model pull with progress not implemented
}
// Test pull completion verification
let verification_result = client.verify_model_pull_completion("tinyllama:1.1b").await;
if let Ok(is_complete) = verification_result
{
// Validate is_complete is a proper boolean field (no assertion needed as it's always valid)
let _ = is_complete;
} else {
// Expected to fail initially - pull verification not implemented
// Expected failure - pull verification not implemented
}
}
#[ tokio::test ]
async fn test_model_push_operations()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
let push_request = PushModelRequest::new("custom-model:latest")
.with_insecure(false)
.with_progress_tracking(true);
// Test model push with progress monitoring
let push_result = client.push_model_with_progress(push_request).await;
if let Ok(mut progress_stream) = push_result
{
let mut progress_updates = 0;
while let Some(progress) = progress_stream.next().await
{
match progress
{
Ok(update) => {
// Verify completed field exists (u64 is always >= 0)
let _ = update.completed;
assert!(update.total > 0);
progress_updates += 1;
},
Err(_) => break,
}
if progress_updates >= 2 { break; } // Limited test
}
} else {
// Expected to fail initially - enhanced push not implemented
// Expected failure - model push with progress not implemented
}
}
#[ tokio::test ]
async fn test_model_size_family_parameter_information()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Test retrieving comprehensive model information
let model_info_result = client.get_comprehensive_model_info("llama2:7b").await;
if let Ok(info) = model_info_result
{
// Validate size information
assert!(info.size_bytes > 0);
assert!(info.size_human_readable.contains("GB") || info.size_human_readable.contains("MB"));
// Validate family information
assert!(!info.family.is_empty());
assert!(!info.families.is_empty());
assert!(info.families.contains(&info.family));
// Validate parameter information
assert!(!info.parameter_size.is_empty());
assert!(info.parameter_size.contains('B')); // Should contain 'B' for billions/millions
assert!(info.parameter_count > 0);
// Validate architecture details
assert!(!info.architecture.is_empty());
assert!(!info.quantization_level.is_empty());
assert!(!info.format.is_empty());
// Validate model capabilities
assert!(!info.supported_features.is_empty());
assert!(info.context_length > 0);
assert!(info.max_sequence_length > 0);
} else {
// Expected to fail initially - comprehensive model info not implemented
// Expected failure - comprehensive model info not implemented
}
// Test family-based model recommendations
let recommendations_result = client.get_model_family_recommendations("llama2:7b").await;
if let Ok(recommendations) = recommendations_result
{
assert!(!recommendations.is_empty());
for rec in recommendations
{
assert!(!rec.model_name.is_empty());
assert!(!rec.reason.is_empty());
assert!(rec.similarity_score >= 0.0 && rec.similarity_score <= 1.0);
}
} else {
// Expected to fail initially - model recommendations not implemented
// Expected failure - model family recommendations not implemented
}
}
#[ tokio::test ]
async fn test_model_lifecycle_and_status_tracking()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Test model lifecycle status tracking
let lifecycle_result = client.get_model_lifecycle_status("llama2:7b").await;
if let Ok(status) = lifecycle_result
{
assert!(matches!(status.current_state,
ModelLifecycle::Ready | ModelLifecycle::Loading | ModelLifecycle::Downloading |
ModelLifecycle::Error | ModelLifecycle::NotFound));
if let Some(last_used) = status.last_used_at
{
assert!(!last_used.is_empty());
}
if let Some(loading_time) = status.last_loading_duration
{
assert!(loading_time > Duration::ZERO);
}
// Verify usage_count field exists (u64 is always >= 0)
let _ = status.usage_count;
} else {
// Expected to fail initially - lifecycle tracking not implemented
// Expected failure - model lifecycle tracking not implemented
}
// Test model operation history
let operation_history = client.get_model_operation_history("llama2:7b", 10).await;
if let Ok(history) = operation_history
{
for operation in history
{
assert!(matches!(operation.operation_type,
ModelOperation::Pull | ModelOperation::Push | ModelOperation::Delete |
ModelOperation::Load | ModelOperation::Unload | ModelOperation::Inference));
assert!(!operation.timestamp.is_empty());
assert!(operation.duration >= Duration::ZERO);
}
} else {
// Expected to fail initially - operation history not implemented
// Expected failure - model operation history not implemented
}
// Test model health check
let health_check = client.perform_model_health_check("llama2:7b").await;
if let Ok(health) = health_check
{
// Validate health.is_available is a proper boolean field (no assertion needed as it's always valid)
assert!(health.response_time >= Duration::ZERO);
assert!(health.health_score >= 0.0 && health.health_score <= 1.0);
if let Some(issues) = health.issues
{
for issue in issues
{
assert!(!issue.is_empty());
}
}
} else {
// Expected to fail initially - model health check not implemented
// Expected failure - model health check not implemented
}
}
#[ tokio::test ]
async fn test_model_details_performance_benchmarks()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Benchmark model details retrieval performance
let iterations = 10;
let mut retrieval_times = Vec::with_capacity(iterations);
for _ in 0..iterations
{
let start = Instant::now();
let _ = client.show_model_detailed(ShowModelRequest::new("llama2:7b")).await;
let elapsed = start.elapsed();
retrieval_times.push(elapsed);
}
// Calculate performance metrics
let total_time: Duration = retrieval_times.iter().sum();
let average_time = total_time / u32::try_from(iterations).unwrap_or(1);
let min_time = retrieval_times.iter().min().unwrap();
let max_time = retrieval_times.iter().max().unwrap();
// Performance assertions
assert!(average_time < Duration::from_millis(500), "Average retrieval time too slow: {average_time:?}");
assert!(*min_time < Duration::from_millis(200), "Min retrieval time too slow: {min_time:?}");
assert!(*max_time < Duration::from_secs(2), "Max retrieval time too slow: {max_time:?}");
println!("Model details retrieval performance:");
println!(" Average: {average_time:?}");
println!(" Min: {min_time:?}");
println!(" Max: {max_time:?}");
// Benchmark model listing performance
let list_start = Instant::now();
let _ = client.list_models_detailed().await;
let list_duration = list_start.elapsed();
assert!(list_duration < Duration::from_secs(3), "Model listing too slow: {list_duration:?}");
println!("Model listing performance: {list_duration:?}");
}
#[ tokio::test ]
async fn test_model_details_concurrent_access()
{
let client = Arc::new(OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() ));
let mut handles = vec![];
// Spawn multiple concurrent model details requests
for i in 0..5
{
let client = client.clone();
let handle = tokio::spawn(async move {
let model_name = if i % 2 == 0 { "llama2:7b" } else { "tinyllama:1.1b" };
let result = client.show_model_detailed(ShowModelRequest::new(model_name)).await;
(i, result.is_ok())
});
handles.push(handle);
}
// Wait for all requests to complete
let mut successful_requests = 0;
for handle in handles
{
if let Ok((_, success)) = handle.await
{
if success
{
successful_requests += 1;
}
}
}
// At least some requests should succeed if models exist
// Or all should fail consistently if features not implemented
println!("Successful concurrent requests: {successful_requests}/5");
}
#[ tokio::test ]
async fn test_model_details_error_handling()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() );
// Test handling of non-existent model
let nonexistent_result = client.show_model_detailed(
ShowModelRequest::new("nonexistent-model:999")
).await;
match nonexistent_result
{
Ok(_) => {
panic!("Should not succeed for non-existent model");
},
Err(error) => {
// Should be a specific model not found error
let error_message = format!("{error:?}");
assert!(error_message.contains("not found") || error_message.contains("404") ||
error_message.contains("NotFound") || error_message.contains("implementation"));
}
}
// Test handling of invalid model name format
let invalid_format_result = client.show_model_detailed(
ShowModelRequest::new("")
).await;
assert!(invalid_format_result.is_err(), "Should fail for empty model name");
// Test handling of malformed requests
let malformed_result = client.get_comprehensive_model_info("invalid::model::name").await;
assert!(malformed_result.is_err(), "Should fail for malformed model name");
}
#[ tokio::test ]
async fn test_model_details_integration_with_client()
{
let client = OllamaClient::new( "http://localhost:11434".to_string(), OllamaClient::recommended_timeout_fast() )
.with_model_details_caching(true)
.with_model_lifecycle_tracking(true);
// Test client integration with model details features
assert!(client.has_model_details_features());
// Test getting cached model details
let cache_result1 = client.get_cached_model_details("llama2:7b").await;
let cache_result2 = client.get_cached_model_details("llama2:7b").await;
match (cache_result1, cache_result2)
{
(Ok(details1), Ok(details2)) => {
assert_eq!(details1.name(), details2.name());
// Second call should be faster due to caching
},
(Err(_), Err(_)) => {
// Expected to fail initially - caching not implemented
// Expected failure - model details caching not implemented
},
_ => {
// Inconsistent results indicate caching issues
panic!("Inconsistent caching behavior");
}
}
// Test model details integration with other client features
let integration_result = client.get_model_details_with_diagnostics("llama2:7b").await;
if let Ok((details, diagnostics)) = integration_result
{
assert!(!details.name().is_empty());
assert!(diagnostics.request_count > 0);
} else {
// Expected to fail initially - integration not implemented
// Expected failure - model details diagnostics integration not implemented
}
}