#![ cfg( all( feature = "streaming", feature = "integration", feature = "integration_tests" ) ) ]
mod server_helpers;
use api_ollama::{
OllamaClient,
ChatMessage,
MessageRole,
ChatRequest
};
use core::time::Duration;
use futures_util::StreamExt;
#[ tokio::test ]
async fn test_streaming_chat_basic()
{
with_test_server!(|mut client : OllamaClient, model : String| async move {
let request = ChatRequest
{
model,
messages : vec![
ChatMessage
{
role : MessageRole::User,
content : "Count from 1 to 3, one number per response.".to_string(),
images : None,
#[ cfg( feature = "tool_calling" ) ]
tool_calls : None,
}
],
stream : None, options : None,
#[ cfg( feature = "tool_calling" ) ]
tools : None,
#[ cfg( feature = "tool_calling" ) ]
tool_messages : None,
};
let mut stream = client.chat_stream(request).await
.expect("Failed to create chat stream - Ollama server must be available for integration tests");
let mut responses = Vec::new();
let mut response_count = 0;
let max_responses = 20;
while let Some(response_result) = stream.next().await
{
response_count += 1;
assert!(response_result.is_ok(), "Stream response error : {response_result:?}");
let response = response_result.unwrap();
responses.push(response.clone());
if !response.message.content.is_empty()
{
response_count += 1;
println!( "Stream chunk {response_count}: '{}'", response.message.content );
}
if response.done
{
println!( "Streaming completed after {response_count} chunks" );
break;
}
if response_count > max_responses
{
println!( "Streaming stopped at safety limit of {max_responses} responses" );
break;
}
}
assert!(!responses.is_empty(), "No streaming responses received");
println!( "Streaming test completed with {} responses", responses.len() );
});
}
#[ tokio::test ]
async fn test_streaming_chat_error_handling()
{
let mut client = OllamaClient::new( "http://unreachable.test:99999".to_string(), OllamaClient::recommended_timeout_fast() )
.with_timeout( Duration::from_millis( 100 ) );
let request = ChatRequest
{
model : "test-model".to_string(),
messages : vec!
[
ChatMessage
{
role : MessageRole::User,
content : "Hello".to_string(),
images : None,
#[ cfg( feature = "tool_calling" ) ]
tool_calls : None,
}
],
stream : None,
options : None,
#[ cfg( feature = "tool_calling" ) ]
tools : None,
#[ cfg( feature = "tool_calling" ) ]
tool_messages : None,
};
let result = client.chat_stream( request ).await;
assert!( result.is_err(), "Expected error for unreachable server" );
if let Err( error ) = result
{
let error_str = format!( "{error}" );
assert!( error_str.contains( "Network error" ), "Expected network error, got : {error_str}" );
}
}
#[ cfg( feature = "streaming" ) ]
#[ test ]
fn test_streaming_feature_compilation()
{
let client = OllamaClient::new( "http://test.local:11434".to_string(), OllamaClient::recommended_timeout_fast() );
let _ = client;
}