gemini_rust/safety/model.rs
1use serde::{Deserialize, Serialize};
2
3/// Setting for safety
4#[derive(Debug, Clone, Serialize, Deserialize)]
5pub struct SafetySetting {
6 /// The category of content to filter
7 pub category: HarmCategory,
8 /// The threshold for filtering
9 pub threshold: HarmBlockThreshold,
10}
11
12/// Category of harmful content
13#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
14pub enum HarmCategory {
15 /// Category is unspecified.
16 #[serde(rename = "HARM_CATEGORY_UNSPECIFIED")]
17 Unspecified,
18 /// PaLM - Negative or harmful comments targeting identity and/or protected attribute.
19 #[serde(rename = "HARM_CATEGORY_DEROGATORY")]
20 Derogatory,
21 /// PaLM - Content that is rude, disrespectful, or profane.
22 #[serde(rename = "HARM_CATEGORY_TOXICITY")]
23 Toxicity,
24 /// PaLM - Describes scenarios depicting violence against an individual or group, or general descriptions of gore.
25 #[serde(rename = "HARM_CATEGORY_VIOLENCE")]
26 Violence,
27 /// PaLM - Contains references to sexual acts or other lewd content.
28 #[serde(rename = "HARM_CATEGORY_SEXUAL")]
29 Sexual,
30 /// PaLM - Promotes unchecked medical advice.
31 #[serde(rename = "HARM_CATEGORY_MEDICAL")]
32 Medical,
33 /// PaLM - Dangerous content that promotes, facilitates, or encourages harmful acts.
34 #[serde(rename = "HARM_CATEGORY_DANGEROUS")]
35 Dangerous,
36 /// Gemini - Harassment content.
37 #[serde(rename = "HARM_CATEGORY_HARASSMENT")]
38 Harassment,
39 /// Gemini - Hate speech and content.
40 #[serde(rename = "HARM_CATEGORY_HATE_SPEECH")]
41 HateSpeech,
42 /// Gemini - Sexually explicit content.
43 #[serde(rename = "HARM_CATEGORY_SEXUALLY_EXPLICIT")]
44 SexuallyExplicit,
45 /// Gemini - Dangerous content.
46 #[serde(rename = "HARM_CATEGORY_DANGEROUS_CONTENT")]
47 DangerousContent,
48}
49
50/// Threshold for blocking harmful content
51#[allow(clippy::enum_variant_names)]
52#[derive(Debug, Clone, Serialize, Deserialize)]
53#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
54pub enum HarmBlockThreshold {
55 /// Threshold is unspecified.
56 HarmBlockThresholdUnspecified,
57 /// Content with NEGLIGIBLE will be allowed.
58 BlockLowAndAbove,
59 /// Content with NEGLIGIBLE and LOW will be allowed.
60 BlockMediumAndAbove,
61 /// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
62 BlockOnlyHigh,
63 /// All content will be allowed.
64 BlockNone,
65 /// Turn off the safety filter.
66 Off,
67}
68
69/// Probability that content is harmful
70#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
71#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
72pub enum HarmProbability {
73 /// Probability is unspecified.
74 HarmProbabilityUnspecified,
75 /// Content has a negligible chance of being unsafe.
76 Negligible,
77 /// Content has a low chance of being unsafe.
78 Low,
79 /// Content has a medium chance of being unsafe.
80 Medium,
81 /// Content has a high chance of being unsafe.
82 High,
83}
84
85/// Safety rating for content
86#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
87pub struct SafetyRating {
88 /// The category of the safety rating
89 pub category: HarmCategory,
90 /// The probability that the content is harmful
91 pub probability: HarmProbability,
92}