1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
use crate::{
error::OpenAIError,
types::{CreateModerationRequest, CreateModerationResponse},
Client,
};
/// Given a input text, outputs if the model classifies it as violating OpenAI's content policy.
///
/// Related guide: [Moderations](https://beta.openai.com/docs/guides/moderation/overview)
pub struct Moderations<'c> {
client: &'c Client,
}
impl<'c> Moderations<'c> {
pub fn new(client: &'c Client) -> Self {
Self { client }
}
/// Classifies if text violates OpenAI's Content Policy
pub async fn create(
&self,
request: CreateModerationRequest,
) -> Result<CreateModerationResponse, OpenAIError> {
self.client.post("/moderations", request).await
}
}