use std::sync::Arc;
use call_agent::chat::{client::{ModelConfig, OpenAIClient}, function::Tool, prompt::{Message, MessageContext}};
use serde_json::Value;
pub struct TextLengthTool;
impl TextLengthTool {
pub fn new() -> Self {
Self
}
}
impl Tool for TextLengthTool {
fn def_name(&self) -> &str {
"text_length_tool"
}
fn def_description(&self) -> &str {
"Returns the length of the input text."
}
fn def_parameters(&self) -> Value {
serde_json::json!({
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Input text to calculate its length"
}
},
"required": ["text"]
})
}
fn run(&self, args: Value) -> Result<String, String> {
println!("{:?}", args);
let text = args["text"].as_str().ok_or_else(|| "Missing 'text' parameter".to_string())?;
let length = text.len();
Ok(serde_json::json!({ "length": length }).to_string())
}
}
#[tokio::main]
async fn main() {
let mut client = OpenAIClient::new(
"https://api.openai.com/v1/",
Some("YOUR_API_KEY"),
);
client.def_tool(Arc::new(TextLengthTool::new()));
let config = ModelConfig {
model: "gpt-4o-mini".to_string(),
strict: None,
max_completion_tokens: Some(1000),
temperature: Some(0.8),
top_p: Some(1.0),
parallel_tool_calls: None,
presence_penalty: Some(0.0),
model_name: None,
reasoning_effort: None,
web_search_options: None, };
client.set_model_config(&config);
let mut prompt_stream = client.create_prompt();
loop {
let mut input = String::new();
std::io::stdin().read_line(&mut input).expect("Failed to read line");
let prompt = vec![Message::User
{
name:Some("user".to_string()),
content:vec!
[
MessageContext::Text(input.trim().to_string()),
],
}
];
prompt_stream.add(prompt).await;
let result = prompt_stream
.generate_can_use_tool::<fn(&str, &serde_json::Value)>(None, None)
.await;
println!("{:?}", result);
let response = prompt_stream.prompt.clone();
println!("{:?}", response);
}
}