1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
/*
* opensearch-client
*
* Rust Client for OpenSearch
*
* The version of the OpenAPI document: 3.1.0
* Contact: alberto.paro@gmail.com
* Generated by Paro OpenAPI Generator
*/
use crate::common;
use crate::indices;
use serde::{Deserialize, Serialize};
/// AnalyzeRequestBodyJson
/// Define analyzer/tokenizer parameters and the text on which the analysis should be performed
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct AnalyzeRequestBodyJson {
/// The name of the analyzer that should be applied to the provided `text`.
/// This could be a built-in analyzer, or an analyzer that's been configured in the index.
#[serde(rename = "analyzer", default, skip_serializing_if = "Option::is_none")]
pub analyzer: Option<String>,
/// The path to a field or an array of paths. Some APIs support wildcards in the path, which allows you to select multiple fields.
#[serde(rename = "field", default, skip_serializing_if = "Option::is_none")]
pub field: Option<String>,
/// Array of token attributes used to filter the output of the `explain` parameter.
#[serde(
rename = "attributes",
default,
skip_serializing_if = "Option::is_none"
)]
pub attributes: Option<Vec<String>>,
/// Array of character filters used to preprocess characters before the tokenizer.
#[serde(
rename = "char_filter",
default,
skip_serializing_if = "Option::is_none"
)]
pub char_filter: Option<Vec<common::analysis::CharFilter>>,
#[serde(rename = "tokenizer", default, skip_serializing_if = "Option::is_none")]
pub tokenizer: Option<common::analysis::Tokenizer>,
/// Array of token filters used to apply after the tokenizer.
#[serde(rename = "filter", default, skip_serializing_if = "Option::is_none")]
pub filter: Option<Vec<common::analysis::TokenFilter>>,
#[serde(rename = "text", default, skip_serializing_if = "Option::is_none")]
pub text: Option<indices::analyze::TextToAnalyze>,
/// If `true`, the response includes token attributes and additional details.
#[serde(rename = "explain", default, skip_serializing_if = "Option::is_none")]
pub explain: Option<bool>,
/// Normalizer to use to convert text into a single token.
#[serde(
rename = "normalizer",
default,
skip_serializing_if = "Option::is_none"
)]
pub normalizer: Option<String>,
}
impl AnalyzeRequestBodyJson {
/// Define analyzer/tokenizer parameters and the text on which the analysis should be performed
pub fn new() -> AnalyzeRequestBodyJson {
AnalyzeRequestBodyJson {
analyzer: None,
field: None,
attributes: None,
char_filter: None,
tokenizer: None,
filter: None,
text: None,
explain: None,
normalizer: None,
}
}
}