text_request_json/
text_request_json.rs

1#[cfg(feature = "beta")]
2use std::env;
3
4#[cfg(feature = "beta")]
5use google_generative_ai_rs::v1::gemini::request::GenerationConfig;
6
7#[cfg(feature = "beta")]
8use google_generative_ai_rs::v1::{
9    api::Client,
10    gemini::{request::Request, Content, Model, Part, Role},
11};
12
13/// JSON-based text request using the public API and an API key for authn
14///
15/// NOTE: Currently, only available on the v1beta API.
16///
17/// To run:
18/// ```
19/// API_KEY=[YOUR_API_KEY] RUST_LOG=info cargo run -- features "beta" --package google-generative-ai-rs  --example text_request_json
20/// ``
21#[tokio::main]
22async fn main() -> Result<(), Box<dyn std::error::Error>> {
23    env_logger::init();
24
25    #[cfg(not(feature = "beta"))]
26    {
27        log::error!("JSON-mode only works currently on Gemini 1.5 Pro and on 'beta'");
28
29        Ok(())
30    }
31
32    #[cfg(feature = "beta")]
33    {
34        // Either run as a standard text request or a stream generate content request
35        let client = Client::new_from_model(
36            Model::Gemini1_5Pro,
37            env::var("API_KEY").unwrap().to_string(),
38        );
39
40        let prompt = r#"List 5 popular cookie recipes using this JSON schema: 
41                        { "type": "object", "properties": { "recipe_name": { "type": "string" }}}"#
42            .to_string();
43
44        log::info!("Prompt: {:#?}", prompt);
45
46        let txt_request = Request {
47            contents: vec![Content {
48                role: Role::User,
49                parts: vec![Part {
50                    text: Some(prompt),
51                    inline_data: None,
52                    file_data: None,
53                    video_metadata: None,
54                }],
55            }],
56            tools: vec![],
57            safety_settings: vec![],
58            generation_config: Some(GenerationConfig {
59                temperature: None,
60                top_p: None,
61                top_k: None,
62                candidate_count: None,
63                max_output_tokens: None,
64                stop_sequences: None,
65                response_mime_type: Some("application/json".to_string()),
66                response_schema: None,
67            }),
68
69            system_instruction: None,
70        };
71
72        let response = client.post(30, &txt_request).await?;
73
74        log::info!("{:#?}", response);
75
76        Ok(())
77    }
78}