#![ cfg( all( feature = "embeddings", feature = "integration_tests" ) ) ]
mod server_helpers;
use api_ollama::{ OllamaClient, EmbeddingsRequest };
use core::time::Duration;
#[ tokio::test ]
async fn test_embeddings_basic()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let request = EmbeddingsRequest
{
model,
prompt : "Hello world".to_string(),
options : None,
};
let embeddings = client.embeddings(request).await
.expect("Embeddings API call should succeed - test server is running");
assert!(!embeddings.embedding.is_empty(), "Embeddings should not be empty");
assert!(!embeddings.embedding.is_empty(), "Embeddings should have positive dimensions");
println!( "✓ Embeddings dimensions : {}", embeddings.embedding.len() );
println!( "✓ Basic embeddings generation successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_multiple_prompts()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let prompts = [
"The quick brown fox jumps over the lazy dog".to_string(),
"Machine learning is a subset of artificial intelligence".to_string(),
"Rust is a systems programming language".to_string(),
];
let request = EmbeddingsRequest
{
model : model.clone(),
prompt : prompts.join(" "),
options : None,
};
let embeddings = client.embeddings(request).await
.expect("Embeddings API call should succeed - test server is running");
assert!(!embeddings.embedding.is_empty(), "Embeddings should not be empty");
let magnitude : f64 = embeddings.embedding.iter().map(|x| x * x).sum::<f64>().sqrt();
assert!(magnitude > 0.0, "Embedding magnitude should be positive");
println!( "✓ Multiple prompts embeddings generation successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_empty_prompt_error()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let request = EmbeddingsRequest
{
model,
prompt : String::new(), options : None,
};
let result = client.embeddings(request).await;
match result
{
Ok(embeddings) =>
{
println!( "✓ Empty prompt handled (got {} dimensions)", embeddings.embedding.len() );
},
Err(error) =>
{
let error_str = format!( "{error}" );
assert!(error_str.contains("empty") || error_str.contains("invalid") || error_str.contains("API error"),
"Error should mention empty, invalid, or API error : {error_str}");
println!( "✓ Empty prompt error handling : {error_str}" );
}
}
println!( "✓ Empty prompt error handling successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_network_error()
{
let mut client = OllamaClient::new( "http://unreachable.test:99999".to_string(), OllamaClient::recommended_timeout_fast() )
.with_timeout( Duration::from_millis( 100 ) );
let request = EmbeddingsRequest
{
model : "test-model".to_string(),
prompt : "Test prompt".to_string(),
options : None,
};
let result = client.embeddings( request ).await;
assert!( result.is_err() );
let error = result.unwrap_err();
let error_str = format!( "{error}" );
assert!( error_str.contains( "Network error" ) );
println!( "✓ Network error handling successful" );
}
#[ tokio::test ]
async fn test_embeddings_invalid_model()
{
with_test_server!(|mut client : OllamaClient, _model : String| async move {
let request = EmbeddingsRequest
{
model : "non-existent-model".to_string(),
prompt : "Test prompt".to_string(),
options : None,
};
let result = client.embeddings(request).await;
assert!(result.is_err(), "Invalid model should result in error");
let error = result.unwrap_err();
let error_str = format!( "{error}" );
assert!(error_str.contains("API error") || error_str.contains("model not found"),
"Error should mention API error or model not found : {error_str}");
println!( "✓ Invalid model error handling successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_with_options()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
use std::collections::HashMap;
let mut options = HashMap::new();
options.insert("temperature".to_string(), serde_json::Value::from(0.1));
options.insert("top_p".to_string(), serde_json::Value::from(0.9));
let request = EmbeddingsRequest
{
model,
prompt : "Test prompt with options".to_string(),
options : Some(options),
};
let result = client.embeddings(request).await;
assert!(result.is_ok(), "Failed to get embeddings with options : {result:?}");
let embeddings = result.unwrap();
assert!(!embeddings.embedding.is_empty(), "Embeddings with options should not be empty");
println!( "✓ Embeddings with options successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_long_prompt()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let long_prompt = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. ".repeat(100);
let request = EmbeddingsRequest
{
model,
prompt : long_prompt,
options : None,
};
let embeddings = client.embeddings(request).await
.expect("Embeddings API call should succeed for long prompt - test server is running");
assert!(!embeddings.embedding.is_empty(), "Embeddings for long prompt should not be empty");
println!( "✓ Long prompt embeddings generation successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_special_characters()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let special_prompt = "Hello! 你好 🌍 Привет مرحبا こんにちは";
let request = EmbeddingsRequest
{
model,
prompt : special_prompt.to_string(),
options : None,
};
let result = client.embeddings(request).await;
assert!(result.is_ok(), "Failed to get embeddings for special characters : {result:?}");
let embeddings = result.unwrap();
assert!(!embeddings.embedding.is_empty(), "Embeddings for special characters should not be empty");
println!( "✓ Special characters embeddings generation successful" );
});
}
#[ tokio::test ]
async fn test_embeddings_consistency()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let prompt = "The same prompt should produce consistent embeddings";
let request1 = EmbeddingsRequest
{
model : model.clone(),
prompt : prompt.to_string(),
options : None,
};
let request2 = EmbeddingsRequest
{
model : model.clone(),
prompt : prompt.to_string(),
options : None,
};
let embeddings1 = client.embeddings(request1).await
.expect("First embeddings API call should succeed - test server is running");
let embeddings2 = client.embeddings(request2).await
.expect("Second embeddings API call should succeed - test server is running");
assert_eq!(embeddings1.embedding.len(), embeddings2.embedding.len(),
"Embeddings should have same dimensions");
let dot_product : f64 = embeddings1.embedding.iter()
.zip(embeddings2.embedding.iter())
.map(|(a, b)| a * b)
.sum();
let magnitude1 : f64 = embeddings1.embedding.iter().map(|x| x * x).sum::<f64>().sqrt();
let magnitude2 : f64 = embeddings2.embedding.iter().map(|x| x * x).sum::<f64>().sqrt();
let cosine_similarity = dot_product / (magnitude1 * magnitude2);
assert!(cosine_similarity > 0.95,
"Cosine similarity should be > 0.95 for identical prompts, got : {cosine_similarity}");
println!( "✓ Embeddings consistency test successful (similarity : {cosine_similarity:.4})" );
});
}
#[ tokio::test ]
async fn test_embeddings_authentication()
{
#[ cfg( feature = "secret_management" ) ]
{
use api_ollama::SecretStore;
with_test_server!(|client : OllamaClient, model : String| async move {
let mut secret_store = SecretStore::new();
secret_store.set("api_key", "test-api-key").expect("Failed to store test API key");
let mut auth_client = client.with_secret_store(secret_store);
let request = EmbeddingsRequest
{
model,
prompt : "Test prompt with authentication".to_string(),
options : None,
};
let embeddings = auth_client.embeddings(request).await
.expect("Embeddings API call with authentication should succeed - test server is running");
assert!(!embeddings.embedding.is_empty(), "Authenticated embeddings should not be empty");
println!( "✓ Embeddings with authentication successful" );
});
}
#[ cfg( not( feature = "secret_management" ) ) ]
{
println!( "⚠ Skipping authentication test - secret_management feature not enabled" );
}
}