1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
// src/api/chat.rs
//! This module defines the `Chat` API client, which provides methods
//! for interacting with the `OpenAI` Chat API.
//!
//! For more details, refer to the [`OpenAI` Chat API documentation](https://platform.openai.com/docs/api-reference/chat).
/// Define a private namespace for all its items.
mod private
{
// Use crate root for base access
use crate::
{
client ::Client,
error ::Result,
environment ::{ OpenaiEnvironment, EnvironmentInterface },
};
use crate::components::chat_shared::
{
ChatCompletionRequest,
CreateChatCompletionResponse,
ChatCompletionStreamResponse,
};
// External crates
use tokio::sync::mpsc;
/// The client for the `OpenAI` Chat API.
#[ derive( Debug, Clone ) ]
pub struct Chat< 'client, E >
where
E : OpenaiEnvironment + EnvironmentInterface + Send + Sync + 'static,
{
client : &'client Client< E >,
}
impl< 'client, E > Chat< 'client, E >
where
E : OpenaiEnvironment + EnvironmentInterface + Send + Sync + 'static,
{
/// Creates a new `Chat` client.
///
/// # Arguments
/// - `client`: The core `OpenAI` `Client` to use for requests.
#[ inline ]
pub(crate) fn new( client : &'client Client< E > ) -> Self
{
Self { client }
}
/// Creates a chat completion.
///
/// # Arguments
/// - `request`: The request body for creating a chat completion.
///
/// # Example
///
/// ```no_run
/// use api_openai::{ Client, environment::{ OpenaiEnvironment, EnvironmentInterface }, components::chat_shared::ChatCompletionRequest, ClientApiAccessors };
///
/// # async fn example(client : Client< impl OpenaiEnvironment + EnvironmentInterface + Send + Sync + 'static >) -> Result<(), Box< dyn core::error::Error > >
/// # {
/// let request = ChatCompletionRequest::former()
/// .model("gpt-4".to_string())
/// .form();
///
/// let response = client.chat().create(request).await?;
/// println!("Response : {:?}", response);
/// # Ok(())
/// # }
/// ```
///
/// # Errors
/// Returns `OpenAIError` if the request fails.
#[ inline ]
pub async fn create( &self, request : ChatCompletionRequest ) -> Result< CreateChatCompletionResponse >
{
// Validate request before processing
#[ cfg( feature = "input_validation" ) ]
{
use crate::input_validation::Validate;
if let Err( validation_errors ) = request.validate()
{
let error_messages : Vec< String > = validation_errors
.iter()
.map( | e | format!( "{e}" ) )
.collect();
return Err( error_tools::Error::from( crate::error::OpenAIError::InvalidArgument( format!( "Request validation failed : {}", error_messages.join( "; " ) ) ) ) );
}
}
self.client.post( "chat/completions", &request ).await
}
/// Creates a chat completion and streams the response.
///
/// # Arguments
/// - `request`: The request body for creating a chat completion.
///
/// # Errors
/// Returns `OpenAIError` if the request fails.
#[ inline ]
pub async fn create_stream( &self, request : ChatCompletionRequest ) -> Result< mpsc::Receiver< Result< ChatCompletionStreamResponse > > >
{
// Validate request before processing
#[ cfg( feature = "input_validation" ) ]
{
use crate::input_validation::Validate;
if let Err( validation_errors ) = request.validate()
{
let error_messages : Vec< String > = validation_errors
.iter()
.map( | e | format!( "{e}" ) )
.collect();
return Err( error_tools::Error::from( crate::error::OpenAIError::InvalidArgument( format!( "Request validation failed : {}", error_messages.join( "; " ) ) ) ) );
}
}
self.client.post_stream( "chat/completions", &request ).await
}
}
} // end mod private
crate ::mod_interface!
{
// Expose all structs defined in this module
exposed use
{
Chat,
};
}