Skip to main content

qdrant_client/builders/
relevance_feedback_input_builder.rs

1use crate::qdrant::*;
2
3#[derive(Clone)]
4pub struct RelevanceFeedbackInputBuilder {
5    /// The original query vector
6    pub(crate) target: VectorInput,
7    /// Previous results scored by the feedback provider
8    pub(crate) feedback: Vec<FeedbackItem>,
9    /// Formula and trained coefficients to use
10    pub(crate) strategy: Option<FeedbackStrategy>,
11}
12
13impl RelevanceFeedbackInputBuilder {
14    /// Create a new builder with a target vector.
15    ///
16    /// # Arguments
17    ///
18    /// * `target` - The original query vector to search around.
19    ///
20    /// # Examples
21    ///
22    /// ```
23    /// use qdrant_client::qdrant::{RelevanceFeedbackInputBuilder, VectorInput};
24    ///
25    /// let builder = RelevanceFeedbackInputBuilder::new(VectorInput::new_dense(vec![0.1, 0.2, 0.3]));
26    /// ```
27    pub fn new(target: impl Into<VectorInput>) -> Self {
28        Self {
29            target: target.into(),
30            feedback: Vec::new(),
31            strategy: None,
32        }
33    }
34
35    /// Add a single feedback item.
36    pub fn add_feedback(mut self, item: impl Into<FeedbackItem>) -> Self {
37        self.feedback.push(item.into());
38        self
39    }
40
41    /// Set the feedback strategy.
42    pub fn strategy(mut self, value: impl Into<FeedbackStrategy>) -> Self {
43        self.strategy = Some(value.into());
44        self
45    }
46
47    /// Builds the desired type. Can often be omitted.
48    pub fn build(self) -> RelevanceFeedbackInput {
49        RelevanceFeedbackInput {
50            target: Some(self.target),
51            feedback: self.feedback,
52            strategy: self.strategy,
53        }
54    }
55}
56
57impl From<RelevanceFeedbackInputBuilder> for RelevanceFeedbackInput {
58    fn from(value: RelevanceFeedbackInputBuilder) -> Self {
59        value.build()
60    }
61}