openai_interface/completions/
mod.rs

1//! Given a prompt, the model will return one or more predicted completions,
2//! and can also return the probabilities of alternative tokens at each position.
3//! Compared to the `chat` API, this one does not provide the ability to have
4//! multiple rounds of conversation. This API is getting deprecated in favor of the
5//! `chat` API.
6
7pub mod request;
8pub mod response;
9
10#[cfg(test)]
11mod tests {
12    use std::sync::LazyLock;
13
14    use futures_util::StreamExt;
15
16    use crate::rest::post::{PostNoStream, PostStream};
17
18    use super::*;
19
20    const QWEN_MODEL: &str = "qwen-coder-turbo-latest";
21    const QWEN_URL: &str = "https://dashscope.aliyuncs.com/compatible-mode/v1/completions";
22    const QWEN_API_KEY: LazyLock<&'static str> =
23        LazyLock::new(|| include_str!("../../keys/modelstudio_domestic_key").trim());
24
25    const REQUEST_BODY: LazyLock<request::CompletionRequest> =
26        LazyLock::new(|| request::CompletionRequest {
27            model: QWEN_MODEL.to_string(),
28            prompt: request::Prompt::PromptString(
29                "\"桂棹兮兰桨,击空明兮溯流光\" 出自哪里?".to_string(),
30            ),
31            max_tokens: Some(500),
32            stream: false,
33            ..Default::default()
34        });
35
36    #[tokio::test]
37    async fn test_qwen_completion_no_stream() -> Result<(), anyhow::Error> {
38        let request_body = &REQUEST_BODY;
39        let completion = request_body.get_response(QWEN_URL, *QWEN_API_KEY).await?;
40        let text = &completion.choices[0].text;
41        println!("Completion no-stream: {}", text);
42        Ok(())
43    }
44
45    #[tokio::test]
46    async fn test_qwen_completion_stream() -> Result<(), anyhow::Error> {
47        let mut request_body = REQUEST_BODY.clone();
48        request_body.stream = true;
49
50        let mut stream = request_body
51            .get_stream_response(QWEN_URL, *QWEN_API_KEY)
52            .await?;
53
54        while let Some(chunk) = stream.next().await {
55            match chunk {
56                Ok(completion) => {
57                    println!(
58                        "Completion stream chunk: {}; finish reason: {:?}",
59                        completion.choices[0].text, completion.choices[0].finish_reason
60                    );
61                }
62                Err(e) => {
63                    eprintln!("Error receiving chunk: {:?}", e);
64                    break;
65                }
66            }
67        }
68
69        Ok(())
70    }
71}