api_xai/sync_api.rs
1mod private
2{
3 //! Synchronous (blocking) API wrappers.
4 //!
5 //! Provides blocking wrappers around the async XAI API client using `tokio::runtime::Runtime`.
6 //!
7 //! # ⚠️ Design Warning
8 //!
9 //! **This contradicts Rust async-first design.** Use cases : legacy integration, simple scripts, learning.
10 //! Not recommended due to : performance overhead, thread blocking, poor composability with async code.
11 //!
12 //! ## Recommended Alternative
13 //!
14 //! Create application-level runtime and use `runtime.block_on()` for async calls:
15 //!
16 //! ```no_run
17 //! use api_xai::{ Client, Secret, XaiEnvironmentImpl, ChatCompletionRequest, Message, ClientApiAccessors };
18 //! use tokio::runtime::Runtime;
19 //!
20 //! # fn example() -> Result< (), Box< dyn std::error::Error > > {
21 //! let rt = Runtime::new()?;
22 //! let secret = Secret::new( "xai-key".to_string() )?;
23 //! let env = XaiEnvironmentImpl::new( secret )?;
24 //! let client = Client::build( env )?;
25 //! let request = ChatCompletionRequest::former()
26 //! .model( "grok-2-1212".to_string() )
27 //! .messages( vec![ Message::user( "Hello!" ) ] )
28 //! .form();
29 //! let response = rt.block_on( client.chat().create( request ) )?;
30 //! # Ok( () )
31 //! # }
32 //! ```
33
34 use crate::{ ChatCompletionRequest, ChatCompletionResponse, Client, XaiEnvironment, ClientApiAccessors };
35 use crate::error::Result;
36 #[ cfg( feature = "streaming" ) ]
37 use crate::ChatCompletionChunk;
38 #[ cfg( feature = "streaming" ) ]
39 use futures_core::Stream;
40 #[ cfg( feature = "streaming" ) ]
41 use std::pin::Pin;
42
43 #[ cfg( feature = "sync_api" ) ]
44 use tokio::runtime::Runtime;
45
46 /// A synchronous (blocking) wrapper around the async XAI client.
47 ///
48 /// **⚠️ WARNING**: This contradicts Rust async-first design principles.
49 /// Use the async `Client` instead when possible.
50 ///
51 /// # Performance Note
52 ///
53 /// Each `SyncClient` owns a `tokio::runtime::Runtime`, which has
54 /// non-trivial overhead. Do not create many `SyncClient` instances.
55 ///
56 /// # Examples
57 ///
58 /// ```no_run
59 /// # #[ cfg( feature = "sync_api") ]
60 /// # {
61 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl, ChatCompletionRequest, Message };
62 ///
63 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
64 /// let secret = Secret::new( "xai-key".to_string() )?;
65 /// let env = XaiEnvironmentImpl::new( secret )?;
66 /// let client = Client::build( env )?;
67 ///
68 /// // Wrap in sync client
69 /// let sync_client = SyncClient::new( client )?;
70 ///
71 /// let request = ChatCompletionRequest::former()
72 /// .model( "grok-2-1212".to_string() )
73 /// .messages( vec![ Message::user( "Hello!" ) ] )
74 /// .form();
75 ///
76 /// // Blocking call
77 /// let response = sync_client.create( request )?;
78 /// println!( "Response : {:?}", response.choices[ 0 ].message.content );
79 /// # Ok( () )
80 /// # }
81 /// # }
82 /// ```
83 #[ cfg( feature = "sync_api" ) ]
84 #[ derive( Debug ) ]
85 pub struct SyncClient< E >
86 where
87 E : XaiEnvironment,
88 {
89 runtime : Runtime,
90 client : Client< E >,
91 }
92
93 #[ cfg( feature = "sync_api" ) ]
94 impl< E > SyncClient< E >
95 where
96 E : XaiEnvironment,
97 {
98 /// Creates a new synchronous client.
99 ///
100 /// # Arguments
101 ///
102 /// * `client` - The async client to wrap
103 ///
104 /// # Errors
105 ///
106 /// Returns error if the tokio runtime cannot be created.
107 ///
108 /// # Examples
109 ///
110 /// ```no_run
111 /// # #[ cfg( feature = "sync_api") ]
112 /// # {
113 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl };
114 ///
115 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
116 /// let secret = Secret::new( "xai-key".to_string() )?;
117 /// let env = XaiEnvironmentImpl::new( secret )?;
118 /// let client = Client::build( env )?;
119 ///
120 /// let sync_client = SyncClient::new( client )?;
121 /// # Ok( () )
122 /// # }
123 /// # }
124 /// ```
125 pub fn new( client : Client< E > ) -> Result< Self >
126 {
127 let runtime = Runtime::new()
128 .map_err( | e | crate::error::XaiError::ApiError( format!( "Runtime error : {e}" ) ) )?;
129
130 Ok
131 (
132 Self
133 {
134 runtime,
135 client,
136 }
137 )
138 }
139
140 /// Creates a chat completion request (blocking).
141 ///
142 /// Blocks the current thread until the API request completes.
143 ///
144 /// # Arguments
145 ///
146 /// * `request` - The chat completion request
147 ///
148 /// # Returns
149 ///
150 /// The chat completion response.
151 ///
152 /// # Errors
153 ///
154 /// Returns errors from the underlying API client.
155 ///
156 /// # Examples
157 ///
158 /// ```no_run
159 /// # #[ cfg( feature = "sync_api") ]
160 /// # {
161 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl, ChatCompletionRequest, Message };
162 ///
163 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
164 /// let secret = Secret::new( "xai-key".to_string() )?;
165 /// let env = XaiEnvironmentImpl::new( secret )?;
166 /// let client = Client::build( env )?;
167 /// let sync_client = SyncClient::new( client )?;
168 ///
169 /// let request = ChatCompletionRequest::former()
170 /// .model( "grok-2-1212".to_string() )
171 /// .messages( vec![ Message::user( "Hello!" ) ] )
172 /// .form();
173 ///
174 /// let response = sync_client.create( request )?;
175 /// # Ok( () )
176 /// # }
177 /// # }
178 /// ```
179 pub fn create( &self, request : ChatCompletionRequest ) -> Result< ChatCompletionResponse >
180 {
181 self.runtime.block_on( self.client.chat().create( request ) )
182 }
183
184 /// Creates a streaming chat completion request (blocking iterator).
185 ///
186 /// Returns a blocking iterator over streaming chunks.
187 ///
188 /// # Arguments
189 ///
190 /// * `request` - The chat completion request
191 ///
192 /// # Returns
193 ///
194 /// A blocking iterator that yields `ChatCompletionChunk` items.
195 ///
196 /// # Errors
197 ///
198 /// Returns errors from the underlying API client.
199 ///
200 /// # Examples
201 ///
202 /// ```no_run
203 /// # #[ cfg( all( feature = "sync_api", feature = "streaming" ) ) ]
204 /// # {
205 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl, ChatCompletionRequest, Message };
206 ///
207 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
208 /// let secret = Secret::new( "xai-key".to_string() )?;
209 /// let env = XaiEnvironmentImpl::new( secret )?;
210 /// let client = Client::build( env )?;
211 /// let sync_client = SyncClient::new( client )?;
212 ///
213 /// let request = ChatCompletionRequest::former()
214 /// .model( "grok-2-1212".to_string() )
215 /// .messages( vec![ Message::user( "Hello!" ) ] )
216 /// .form();
217 ///
218 /// let mut stream = sync_client.create_stream( request )?;
219 /// for chunk in stream
220 /// {
221 /// let chunk = chunk?;
222 /// if let Some( choice ) = chunk.choices.first()
223 /// {
224 /// if let Some( ref content ) = choice.delta.content
225 /// {
226 /// print!( "{}", content );
227 /// }
228 /// }
229 /// }
230 /// # Ok( () )
231 /// # }
232 /// # }
233 /// ```
234 #[ cfg( feature = "streaming" ) ]
235 pub fn create_stream( &self, request : ChatCompletionRequest ) -> Result< SyncStreamIterator< E > >
236 {
237 let stream = self.runtime.block_on( self.client.chat().create_stream( request ) )?;
238
239 Ok
240 (
241 SyncStreamIterator
242 {
243 stream,
244 runtime : Runtime::new()
245 .map_err( | e | crate::error::XaiError::ApiError( format!( "Runtime error : {e}" ) ) )?,
246 _phantom : core::marker::PhantomData,
247 }
248 )
249 }
250
251 /// Lists available models (blocking).
252 ///
253 /// # Returns
254 ///
255 /// List models response.
256 ///
257 /// # Errors
258 ///
259 /// Returns errors from the underlying API client.
260 ///
261 /// # Examples
262 ///
263 /// ```no_run
264 /// # #[ cfg( feature = "sync_api") ]
265 /// # {
266 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl };
267 ///
268 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
269 /// let secret = Secret::new( "xai-key".to_string() )?;
270 /// let env = XaiEnvironmentImpl::new( secret )?;
271 /// let client = Client::build( env )?;
272 /// let sync_client = SyncClient::new( client )?;
273 ///
274 /// let response = sync_client.list_models()?;
275 /// for model in response.data
276 /// {
277 /// println!( "Model : {}", model.id );
278 /// }
279 /// # Ok( () )
280 /// # }
281 /// # }
282 /// ```
283 pub fn list_models( &self ) -> Result< crate::components::ListModelsResponse >
284 {
285 self.runtime.block_on( self.client.models().list() )
286 }
287
288 /// Gets model information (blocking).
289 ///
290 /// # Arguments
291 ///
292 /// * `model_id` - The model ID to retrieve
293 ///
294 /// # Returns
295 ///
296 /// Model information.
297 ///
298 /// # Errors
299 ///
300 /// Returns errors from the underlying API client.
301 ///
302 /// # Examples
303 ///
304 /// ```no_run
305 /// # #[ cfg( feature = "sync_api") ]
306 /// # {
307 /// use api_xai::{ SyncClient, Client, Secret, XaiEnvironmentImpl };
308 ///
309 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
310 /// let secret = Secret::new( "xai-key".to_string() )?;
311 /// let env = XaiEnvironmentImpl::new( secret )?;
312 /// let client = Client::build( env )?;
313 /// let sync_client = SyncClient::new( client )?;
314 ///
315 /// let model = sync_client.get_model( "grok-2-1212" )?;
316 /// println!( "Model : {}", model.id );
317 /// # Ok( () )
318 /// # }
319 /// # }
320 /// ```
321 pub fn get_model( &self, model_id : &str ) -> Result< crate::components::Model >
322 {
323 self.runtime.block_on( self.client.models().get( model_id ) )
324 }
325 }
326
327 /// Synchronous iterator wrapper around async streaming.
328 ///
329 /// Provides a blocking iterator over `ChatCompletionChunk` items
330 /// by wrapping the async stream in a dedicated tokio runtime.
331 #[ cfg( feature = "streaming" ) ]
332 pub struct SyncStreamIterator< E >
333 where
334 E : XaiEnvironment + Send + Sync + 'static,
335 {
336 stream : Pin< Box< dyn Stream< Item = Result< ChatCompletionChunk > > + Send + 'static > >,
337 runtime : Runtime,
338 _phantom : core::marker::PhantomData< E >,
339 }
340
341 #[ cfg( feature = "streaming" ) ]
342 impl< E > core::fmt::Debug for SyncStreamIterator< E >
343 where
344 E : XaiEnvironment + Send + Sync + 'static,
345 {
346 fn fmt( &self, f : &mut core::fmt::Formatter< '_ > ) -> core::fmt::Result
347 {
348 f.debug_struct( "SyncStreamIterator" )
349 .field( "runtime", &self.runtime )
350 .finish_non_exhaustive()
351 }
352 }
353
354 #[ cfg( feature = "streaming" ) ]
355 impl< E > Iterator for SyncStreamIterator< E >
356 where
357 E : XaiEnvironment + Send + Sync + 'static,
358 {
359 type Item = Result< ChatCompletionChunk >;
360
361 fn next( &mut self ) -> Option< Self::Item >
362 {
363 use futures_util::StreamExt;
364
365 self.runtime.block_on( self.stream.next() )
366 }
367 }
368
369 /// Synchronous wrapper for `count_tokens` (requires `count_tokens` feature).
370 ///
371 /// Counts tokens in a text string for a specific model.
372 ///
373 /// # Arguments
374 ///
375 /// * `text` - The text to count tokens for
376 /// * `model` - The model name
377 ///
378 /// # Returns
379 ///
380 /// Number of tokens in the text.
381 ///
382 /// # Errors
383 ///
384 /// Returns `XaiError::InvalidModel` if the model is not supported.
385 ///
386 /// # Examples
387 ///
388 /// ```no_run
389 /// # #[ cfg( all(feature = "sync_api", feature = "count_tokens")) ]
390 /// # {
391 /// use api_xai::sync_count_tokens;
392 ///
393 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
394 /// let count = sync_count_tokens( "Hello, world!", "grok-2-1212" )?;
395 /// println!( "Token count : {}", count );
396 /// # Ok( () )
397 /// # }
398 /// # }
399 /// ```
400 #[ cfg( all( feature = "sync_api", feature = "count_tokens" ) ) ]
401 pub fn sync_count_tokens( text : &str, model : &str ) -> Result< usize >
402 {
403 crate::count_tokens( text, model )
404 }
405
406 /// Synchronous wrapper for `count_tokens_for_request` (requires `count_tokens` feature).
407 ///
408 /// Counts tokens in a chat completion request.
409 ///
410 /// # Arguments
411 ///
412 /// * `request` - The chat completion request
413 ///
414 /// # Returns
415 ///
416 /// Estimated total token count for the request.
417 ///
418 /// # Errors
419 ///
420 /// Returns `XaiError::InvalidModel` if the model is not supported.
421 ///
422 /// # Examples
423 ///
424 /// ```no_run
425 /// # #[ cfg( all(feature = "sync_api", feature = "count_tokens")) ]
426 /// # {
427 /// use api_xai::{ sync_count_tokens_for_request, ChatCompletionRequest, Message };
428 ///
429 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
430 /// let request = ChatCompletionRequest::former()
431 /// .model( "grok-2-1212".to_string() )
432 /// .messages( vec![ Message::user( "Hello!" ) ] )
433 /// .form();
434 ///
435 /// let count = sync_count_tokens_for_request( &request )?;
436 /// println!( "Total request tokens : {}", count );
437 /// # Ok( () )
438 /// # }
439 /// # }
440 /// ```
441 #[ cfg( all( feature = "sync_api", feature = "count_tokens" ) ) ]
442 pub fn sync_count_tokens_for_request( request : &ChatCompletionRequest ) -> Result< usize >
443 {
444 crate::count_tokens_for_request( request )
445 }
446
447 /// Synchronous wrapper for `validate_request_size` (requires `count_tokens` feature).
448 ///
449 /// Validates that a request fits within the model's context window.
450 ///
451 /// # Arguments
452 ///
453 /// * `request` - The chat completion request
454 /// * `max_tokens` - The model's maximum context window size
455 ///
456 /// # Returns
457 ///
458 /// `Ok(())` if the request fits, error otherwise.
459 ///
460 /// # Errors
461 ///
462 /// Returns `XaiError::InvalidParameter` if the request exceeds the context window.
463 ///
464 /// # Examples
465 ///
466 /// ```no_run
467 /// # #[ cfg( all(feature = "sync_api", feature = "count_tokens")) ]
468 /// # {
469 /// use api_xai::{ sync_validate_request_size, ChatCompletionRequest, Message };
470 ///
471 /// # fn example() -> Result< (), Box< dyn std::error::Error > > {
472 /// let request = ChatCompletionRequest::former()
473 /// .model( "grok-2-1212".to_string() )
474 /// .messages( vec![ Message::user( "Hello!" ) ] )
475 /// .form();
476 ///
477 /// // Grok-3 has 131K context window
478 /// sync_validate_request_size( &request, 131072 )?;
479 /// # Ok( () )
480 /// # }
481 /// # }
482 /// ```
483 #[ cfg( all( feature = "sync_api", feature = "count_tokens" ) ) ]
484 pub fn sync_validate_request_size
485 (
486 request : &ChatCompletionRequest,
487 max_tokens : usize
488 )
489 -> Result< () >
490 {
491 crate::validate_request_size( request, max_tokens )
492 }
493
494 /// Synchronous wrapper for `cached_create` (requires `caching` feature).
495 ///
496 /// **Note**: This is NOT recommended. Caching works better with async
497 /// because the cache can be shared across concurrent requests.
498 ///
499 /// For sync usage, prefer using `SyncClient` with application-level caching.
500 #[ cfg( all( feature = "sync_api", feature = "caching" ) ) ]
501 #[ derive( Debug ) ]
502 pub struct SyncCachedClient< E >
503 where
504 E : XaiEnvironment,
505 {
506 runtime : Runtime,
507 cached_client : crate::CachedClient< E >,
508 }
509
510 #[ cfg( all( feature = "sync_api", feature = "caching" ) ) ]
511 impl< E > SyncCachedClient< E >
512 where
513 E : XaiEnvironment,
514 {
515 /// Creates a new synchronous cached client.
516 ///
517 /// # Arguments
518 ///
519 /// * `client` - The async client to wrap
520 /// * `capacity` - Maximum number of responses to cache
521 ///
522 /// # Errors
523 ///
524 /// Returns error if the tokio runtime cannot be created.
525 pub fn new( client : Client< E >, capacity : usize ) -> Result< Self >
526 {
527 let runtime = Runtime::new()
528 .map_err( | e | crate::error::XaiError::ApiError( format!( "Runtime error : {e}" ) ) )?;
529
530 let cached_client = crate::CachedClient::new( client, capacity );
531
532 Ok
533 (
534 Self
535 {
536 runtime,
537 cached_client,
538 }
539 )
540 }
541
542 /// Creates a chat completion request with caching (blocking).
543 ///
544 /// # Arguments
545 ///
546 /// * `request` - The chat completion request
547 ///
548 /// # Returns
549 ///
550 /// The chat completion response (cached or fresh).
551 ///
552 /// # Errors
553 ///
554 /// Returns errors from the underlying API client, including network errors,
555 /// API errors, authentication failures, and serialization errors.
556 pub fn create( &self, request : ChatCompletionRequest ) -> Result< ChatCompletionResponse >
557 {
558 self.runtime.block_on( self.cached_client.cached_create( request ) )
559 }
560
561 /// Clears all cached responses.
562 pub fn clear( &self )
563 {
564 self.cached_client.clear();
565 }
566
567 /// Returns the number of cached responses.
568 pub fn len( &self ) -> usize
569 {
570 self.cached_client.len()
571 }
572
573 /// Returns true if the cache is empty.
574 pub fn is_empty( &self ) -> bool
575 {
576 self.cached_client.is_empty()
577 }
578 }
579}
580
581#[ cfg( feature = "sync_api" ) ]
582crate::mod_interface!
583{
584 exposed use
585 {
586 SyncClient,
587 };
588
589 #[ cfg( all( feature = "sync_api", feature = "streaming" ) ) ]
590 exposed use
591 {
592 SyncStreamIterator,
593 };
594
595 #[ cfg( all( feature = "sync_api", feature = "count_tokens" ) ) ]
596 exposed use
597 {
598 sync_count_tokens,
599 sync_count_tokens_for_request,
600 sync_validate_request_size,
601 };
602
603 #[ cfg( all( feature = "sync_api", feature = "caching" ) ) ]
604 exposed use
605 {
606 SyncCachedClient,
607 };
608}