Function vectorize_string_concurrently

Source
pub async fn vectorize_string_concurrently<C>(
    prompts: Vec<String>,
    vector: &mut Vector<String>,
    client: Client<C>,
    model_parameters: ModelParameters,
) -> Result<(), Error>
where C: Config + Send + Sync + 'static,
Expand description

Concurrently vectorizes a text string with multiple prompts.

§Arguments

  • model - The name/identifier of the LLM model to use
  • prompts - A vector of prompts to process concurrently
  • vector - A mutable reference to the Vector struct containing the text
  • client - The OpenAI API client

§Returns

  • Result<(), Error> - Ok(()) on success, Error on failure
Examples found in repository?
examples/vectorize_texts.rs (lines 40-45)
7async fn main() -> Result<(), Error> {
8    // Load text
9    let test_text: String = "Hi, this is dim. I am here to vectorize whatever your want."
10        .to_string();
11    
12    // Create a Vector object from the image
13    let mut vector: Vector<String> = Vector::from_text(
14        test_text
15    );
16    
17    // Initialize client
18    let client: async_openai::Client<async_openai::config::OpenAIConfig> = async_openai::Client::with_config(
19        async_openai::config::OpenAIConfig::new()
20            .with_api_base("http://192.168.0.101:11434/v1") // comment this out if you use OpenAI instead of Ollama
21            .with_api_key("your_api_key")
22    );
23    
24    // Initialize prompts
25    let prompts: Vec<String> = vec![
26        "Score the sentiment intensity of the text from 1 (extremely negative) to 9 (extremely positive). Consider emotional language, tone, and context. Format your response exactly like this example: {'sentiment_score': 7}".to_string(),
27        "Rate the formality of the text from 1 (highly informal, slang-heavy) to 9 (highly formal, academic/professional). Format your response exactly like this example: {'formality_score': 4}".to_string(),
28        "Assess the emotional intensity of the text from 1 (neutral/clinical) to 9 (highly emotional, passionate, or provocative). Format your response exactly like this example: {'emotional_score': 8}".to_string(),
29        "Score how subjective the text is from 1 (purely factual/objective) to 9 (heavily opinionated/subjective). Format your response exactly like this example: {'subjectivity_score': 6}".to_string(),
30        "Rate the linguistic complexity of the text from 1 (simple vocabulary/short sentences) to 9 (dense jargon/long, intricate sentences). Format your response exactly like this example: {'complexity_score': 3}".to_string(),
31        "Score the dominant intent: 1-3 (informative/educational), 4-6 (persuasive/argumentative), 7-9 (narrative/storytelling). Format your response exactly like this example: {'intent_score': 5}".to_string(),
32        "Rate how urgent or time-sensitive the text feels from 1 (no urgency) to 9 (immediate action required). Format your response exactly like this example: {'urgency_score': 2}".to_string(),
33        "Score the specificity of details from 1 (vague/abstract) to 9 (highly specific/concrete examples). Format your response exactly like this example: {'specificity_score': 7}".to_string(),
34        "Rate the politeness of the tone from 1 (rude/confrontational) to 9 (extremely polite/deferential). Format your response exactly like this example: {'politeness_score': 8}".to_string(),
35        "Categorize the text's primary domain: 1-3 (technical/scientific), 4-6 (casual/everyday), 7-9 (artistic/creative). Format your response exactly like this example: {'domain_score': 4}".to_string(),
36    ];
37
38    // Vectorize image
39    let model_parameters = ModelParameters::new("minicpm-v".to_string(), None, None);
40    vectorize_string_concurrently(
41        prompts,
42        &mut vector, 
43        client,
44        model_parameters
45    ).await?;
46
47    // Print vectorized result
48    println!("Vector: {:?}", vector.get_vector());
49    println!("Vector Length: {:?}", vector.get_vector().len());
50    
51    Ok(())
52}
More examples
Hide additional examples
examples/vectorize_multiple_texts.rs (lines 61-66)
7async fn main() -> Result<(), Error> {
8    // Load multiple texts
9    let texts = vec![
10        "Hi, this is dim. I am here to vectorize whatever you want.".to_string(),
11        "The weather is beautiful today. Perfect for a walk outside.".to_string(),
12        "Artificial intelligence is transforming how we live and work.".to_string(),
13        "Remember to drink water and stay hydrated throughout the day.".to_string(),
14        "The quick brown fox jumps over the lazy dog.".to_string(),
15        "Programming is both an art and a science.".to_string(),
16        "Music has the power to change our moods instantly.".to_string(),
17        "Exercise regularly for better physical and mental health.".to_string(),
18        "Learning a new language opens doors to different cultures.".to_string(),
19        "Time management is essential for productivity.".to_string(),
20        "Reading books can expand your knowledge and imagination.".to_string(),
21        "Traveling allows you to experience new perspectives.".to_string(),
22        "Cooking at home can be both fun and healthy.".to_string(),
23        "Meditation helps in reducing stress and improving focus.".to_string(),
24        "Gardening is a relaxing and rewarding hobby.".to_string(),
25        "Volunteering can make a positive impact on your community.".to_string(),
26        "Photography captures moments and memories.".to_string(),
27        "Writing in a journal can help clarify your thoughts.".to_string(),
28        "Playing board games is a great way to bond with family and friends.".to_string(),
29        "Learning to play a musical instrument can be very fulfilling.".to_string(),
30    ];
31    
32    // Create Vector objects from the texts
33    let mut vectors: Vec<Vector<String>> = texts.into_iter()
34        .map(Vector::from_text)
35        .collect();
36    
37    // Initialize client
38    let client: async_openai::Client<async_openai::config::OpenAIConfig> = async_openai::Client::with_config(
39        async_openai::config::OpenAIConfig::new()
40            .with_api_base("http://192.168.0.101:11434/v1") // comment this out if you use OpenAI instead of Ollama
41            .with_api_key("your_api_key")
42    );
43    
44    // Initialize prompts
45    let prompts: Vec<String> = vec![
46        "Score the sentiment intensity of the text from 1 (extremely negative) to 9 (extremely positive). Consider emotional language, tone, and context. Format your response exactly like this example: {'sentiment_score': 7}".to_string(),
47        "Rate the formality of the text from 1 (highly informal, slang-heavy) to 9 (highly formal, academic/professional). Format your response exactly like this example: {'formality_score': 4}".to_string(),
48        "Assess the emotional intensity of the text from 1 (neutral/clinical) to 9 (highly emotional, passionate, or provocative). Format your response exactly like this example: {'emotional_score': 8}".to_string(),
49        "Score how subjective the text is from 1 (purely factual/objective) to 9 (heavily opinionated/subjective). Format your response exactly like this example: {'subjectivity_score': 6}".to_string(),
50        "Rate the linguistic complexity of the text from 1 (simple vocabulary/short sentences) to 9 (dense jargon/long, intricate sentences). Format your response exactly like this example: {'complexity_score': 3}".to_string(),
51        "Score the dominant intent: 1-3 (informative/educational), 4-6 (persuasive/argumentative), 7-9 (narrative/storytelling). Format your response exactly like this example: {'intent_score': 5}".to_string(),
52        "Rate how urgent or time-sensitive the text feels from 1 (no urgency) to 9 (immediate action required). Format your response exactly like this example: {'urgency_score': 2}".to_string(),
53        "Score the specificity of details from 1 (vague/abstract) to 9 (highly specific/concrete examples). Format your response exactly like this example: {'specificity_score': 7}".to_string(),
54        "Rate the politeness of the tone from 1 (rude/confrontational) to 9 (extremely polite/deferential). Format your response exactly like this example: {'politeness_score': 8}".to_string(),
55        "Categorize the text's primary domain: 1-3 (technical/scientific), 4-6 (casual/everyday), 7-9 (artistic/creative). Format your response exactly like this example: {'domain_score': 4}".to_string(),
56    ];
57
58    // Vectorize all texts
59    for vector in &mut vectors {
60        let model_parameters = ModelParameters::new("mistral".to_string(), None, None);
61        vectorize_string_concurrently(
62            prompts.clone(),
63            vector,
64            client.clone(),
65            model_parameters
66        ).await?;
67    }
68
69    // Print statistics and validate vectors
70    println!("\n=== Vectorization Results ===\n");
71    
72    // Get all vector lengths
73    let lengths: Vec<usize> = vectors.iter()
74        .map(|v| v.get_vector().len())
75        .collect();
76    
77    // Validate that all vectors have the same length
78    let first_len = lengths[0];
79    let all_same_length = lengths.iter().all(|&len| len == first_len);
80
81    // Print results for each vector
82    for (i, vector) in vectors.iter().enumerate() {
83        println!("Text #{}", i + 1);
84        println!("Vector: {:?}", vector.get_vector());
85        println!("Length: {}", vector.get_vector().len());
86        println!();
87    }
88
89    // Print validation results
90    println!("=== Validation ===");
91    println!("All vectors have same length: {}", all_same_length);
92    println!("Vector dimension: {}", first_len);
93    
94    if !all_same_length {
95        println!("WARNING: Inconsistent vector lengths detected!");
96        println!("Lengths: {:?}", lengths);
97    }
98
99    Ok(())
100}