openai_rs/endpoints/
answer.rs

1use std::borrow::Cow;
2use std::collections::HashMap;
3use hyper::{Body, Request};
4use serde::Serialize;
5use crate::endpoints::Model;
6use crate::endpoints::request::Endpoint;
7
8/// Given a question, a set of documents, and some examples, the API generates an answer to the
9/// question based on the information in the set of documents.
10/// This is useful for question-answering applications on sources of truth,
11/// like company documentation or a knowledge base.
12#[derive(Debug, Clone, Serialize)]
13pub struct Answer<'a> {
14    /// ID of the engine to use for completion. You can select one of ada, babbage, curie, or davinci.
15    pub model: Model,
16
17    /// The question to answer.
18    pub question: Cow<'a, str>,
19
20    /// A list of documents to use for answering the question.
21    pub examples: Vec<[Cow<'a, str>; 2]>,
22
23    /// A text snippet containing the contextual information used to generate the answers
24    /// for the examples you provide.
25    pub examples_context: Cow<'a, str>,
26
27    /// List of documents from which the answer for the input question should be derived.
28    /// If this is an empty list, the question will be answered based on the question-answer examples.
29    /// You should specify either documents or a file, but not both.
30    pub documents: Vec<Cow<'a, str>>,
31
32    /// The ID of an uploaded file that contains documents to search over.
33    /// See upload file for how to upload a file of the desired format and purpose.
34    /// You should specify either documents or a file, but not both.
35    pub file: Option<Cow<'a, str>>,
36
37    /// ID of the engine to use for Search. You can select one of ada, babbage, curie, or davinci.
38    pub search_model: Model,
39
40    /// The maximum number of documents to be ranked by Search when using file.
41    /// Setting it to a higher value leads to improved accuracy but with increased latency and cost.
42    pub max_rerank: u32,
43
44    /// What sampling temperature to use. Higher values mean the model will take more risks
45    /// and value 0 (argmax sampling) works better for scenarios with a well-defined answer.
46    pub temperature: f32,
47
48    /// Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
49    /// For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
50    /// The API will always return the logprob of the sampled token,
51    /// so there may be up to logprobs+1 elements in the response.
52    /// The maximum value for logprobs is 5.
53    /// If you need more than this, please contact support@openai.com and describe your use case.
54    pub logprobs: u32,
55
56    /// The maximum number of tokens allowed for the generated answer
57    pub max_tokens: u32,
58
59    /// Up to 4 sequences where the API will stop generating further tokens.
60    /// The returned text will not contain the stop sequence.
61    pub stop: Option<Vec<Cow<'a, str>>>,
62
63    /// How many answers to generate for each question.
64    pub n: u32,
65
66    /// Modify the likelihood of specified tokens appearing in the completion.
67    /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer)
68    /// to an associated bias value from -100 to 100. You can use this tokenizer tool (which works
69    /// for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically,
70    /// the bias is added to the logits generated by the model prior to sampling. The exact effect
71    /// will vary per model, but values between -1 and 1 should decrease or increase likelihood
72    /// of selection; values like -100 or 100 should result in a ban or exclusive selection
73    /// of the relevant token.
74    pub logit_bias: HashMap<Cow<'a, str>, i32>,
75
76    /// A special boolean flag for showing metadata. If set to true, each document entry in the
77    /// returned JSON will contain a "metadata" field. This flag only takes effect when file is set.
78    pub return_metadata: bool,
79
80    /// If set to true, the returned JSON will include a "prompt" field containing the final prompt
81    /// that was used to request a completion. This is mainly useful for debugging purposes.
82    pub return_prompt: bool,
83
84    /// If an object name is in the list, we provide the full information of the object;
85    /// otherwise, we only provide the object ID. Currently we support completion and file objects for expansion.
86    pub expand: Vec<Cow<'a, str>>,
87
88    /// A unique identifier representing your end-user,
89    /// which will help OpenAI to monitor and detect abuse.
90    pub user: Cow<'a, str>
91}
92
93impl Default for Answer<'_> {
94    fn default() -> Self {
95        Self {
96            model: Model::Ada,
97            question: Cow::Borrowed(""),
98            examples: Vec::new(),
99            examples_context: Cow::Borrowed(""),
100            documents: Vec::new(),
101            file: None,
102            search_model: Model::Ada,
103            max_rerank: 200,
104            temperature: 0.0,
105            logprobs: 0,
106            max_tokens: 16,
107            stop: None,
108            n: 1,
109            logit_bias: HashMap::new(),
110            return_metadata: false,
111            return_prompt: false,
112            expand: Vec::new(),
113            user: Cow::Borrowed("")
114        }
115    }
116}
117
118impl Endpoint for Answer<'_> {
119    const ENDPOINT: &'static str = "https://api.openai.com/v1/answers";
120
121    fn request(&self, auth_token: &str, _engine_id: Option<&str>) -> Request<Body> {
122        let serialized = serde_json::to_string(&self)
123            .expect("Failed to serialize Answer");
124        let endpoint = Self::ENDPOINT.to_owned();
125        trace!("endpoint={}, serialized={}", endpoint, serialized);
126
127
128        super::request::post!(endpoint, auth_token, serialized)
129    }
130}