gemini_rust/safety/model.rs
1use serde::{Deserialize, Serialize};
2
3/// Setting for safety
4#[derive(Debug, Clone, Serialize, Deserialize)]
5pub struct SafetySetting {
6 /// The category of content to filter
7 pub category: HarmCategory,
8 /// The threshold for filtering
9 pub threshold: HarmBlockThreshold,
10}
11
12/// Category of harmful content
13#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
14pub enum HarmCategory {
15 /// Category is unspecified.
16 #[serde(rename = "HARM_CATEGORY_UNSPECIFIED")]
17 Unspecified,
18 /// PaLM - Negative or harmful comments targeting identity and/or protected attribute.
19 #[serde(rename = "HARM_CATEGORY_DEROGATORY")]
20 Derogatory,
21 /// PaLM - Content that is rude, disrespectful, or profane.
22 #[serde(rename = "HARM_CATEGORY_TOXICITY")]
23 Toxicity,
24 /// PaLM - Describes scenarios depicting violence against an individual or group, or general descriptions of gore.
25 #[serde(rename = "HARM_CATEGORY_VIOLENCE")]
26 Violence,
27 /// PaLM - Contains references to sexual acts or other lewd content.
28 #[serde(rename = "HARM_CATEGORY_SEXUAL")]
29 Sexual,
30 /// PaLM - Promotes unchecked medical advice.
31 #[serde(rename = "HARM_CATEGORY_MEDICAL")]
32 Medical,
33 /// PaLM - Dangerous content that promotes, facilitates, or encourages harmful acts.
34 #[serde(rename = "HARM_CATEGORY_DANGEROUS")]
35 Dangerous,
36 /// Gemini - Harassment content.
37 #[serde(rename = "HARM_CATEGORY_HARASSMENT")]
38 Harassment,
39 /// Gemini - Hate speech and content.
40 #[serde(rename = "HARM_CATEGORY_HATE_SPEECH")]
41 HateSpeech,
42 /// Gemini - Sexually explicit content.
43 #[serde(rename = "HARM_CATEGORY_SEXUALLY_EXPLICIT")]
44 SexuallyExplicit,
45 /// Gemini - Dangerous content.
46 #[serde(rename = "HARM_CATEGORY_DANGEROUS_CONTENT")]
47 DangerousContent,
48 /// Gemini - Content that may be used to harm civic integrity, such as misinformation about voting, elections, or democratic processes, or content that undermines trust in civic institutions.
49 #[serde(rename = "HARM_CATEGORY_CIVIC_INTEGRITY")]
50 CivicIntegrity,
51}
52
53/// Threshold for blocking harmful content
54#[allow(clippy::enum_variant_names)]
55#[derive(Debug, Clone, Serialize, Deserialize)]
56#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
57pub enum HarmBlockThreshold {
58 /// Threshold is unspecified.
59 HarmBlockThresholdUnspecified,
60 /// Content with NEGLIGIBLE will be allowed.
61 BlockLowAndAbove,
62 /// Content with NEGLIGIBLE and LOW will be allowed.
63 BlockMediumAndAbove,
64 /// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
65 BlockOnlyHigh,
66 /// All content will be allowed.
67 BlockNone,
68 /// Turn off the safety filter.
69 Off,
70}
71
72/// Probability that content is harmful
73#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
74#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
75pub enum HarmProbability {
76 /// Probability is unspecified.
77 HarmProbabilityUnspecified,
78 /// Content has a negligible chance of being unsafe.
79 Negligible,
80 /// Content has a low chance of being unsafe.
81 Low,
82 /// Content has a medium chance of being unsafe.
83 Medium,
84 /// Content has a high chance of being unsafe.
85 High,
86}
87
88/// Safety rating for content
89#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
90pub struct SafetyRating {
91 /// The category of the safety rating
92 pub category: HarmCategory,
93 /// The probability that the content is harmful
94 pub probability: HarmProbability,
95}