#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnotateVideoRequest {
#[prost(string, tag = "1")]
pub input_uri: ::prost::alloc::string::String,
#[prost(bytes = "vec", tag = "6")]
pub input_content: ::prost::alloc::vec::Vec<u8>,
#[prost(enumeration = "Feature", repeated, packed = "false", tag = "2")]
pub features: ::prost::alloc::vec::Vec<i32>,
#[prost(message, optional, tag = "3")]
pub video_context: ::core::option::Option<VideoContext>,
#[prost(string, tag = "4")]
pub output_uri: ::prost::alloc::string::String,
#[prost(string, tag = "5")]
pub location_id: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoContext {
#[prost(message, repeated, tag = "1")]
pub segments: ::prost::alloc::vec::Vec<VideoSegment>,
#[prost(message, optional, tag = "2")]
pub label_detection_config: ::core::option::Option<LabelDetectionConfig>,
#[prost(message, optional, tag = "3")]
pub shot_change_detection_config: ::core::option::Option<ShotChangeDetectionConfig>,
#[prost(message, optional, tag = "4")]
pub explicit_content_detection_config: ::core::option::Option<ExplicitContentDetectionConfig>,
#[prost(message, optional, tag = "5")]
pub face_detection_config: ::core::option::Option<FaceDetectionConfig>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LabelDetectionConfig {
#[prost(enumeration = "LabelDetectionMode", tag = "1")]
pub label_detection_mode: i32,
#[prost(bool, tag = "2")]
pub stationary_camera: bool,
#[prost(string, tag = "3")]
pub model: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ShotChangeDetectionConfig {
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExplicitContentDetectionConfig {
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FaceDetectionConfig {
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
#[prost(bool, tag = "2")]
pub include_bounding_boxes: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoSegment {
#[prost(message, optional, tag = "1")]
pub start_time_offset: ::core::option::Option<::prost_types::Duration>,
#[prost(message, optional, tag = "2")]
pub end_time_offset: ::core::option::Option<::prost_types::Duration>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LabelSegment {
#[prost(message, optional, tag = "1")]
pub segment: ::core::option::Option<VideoSegment>,
#[prost(float, tag = "2")]
pub confidence: f32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LabelFrame {
#[prost(message, optional, tag = "1")]
pub time_offset: ::core::option::Option<::prost_types::Duration>,
#[prost(float, tag = "2")]
pub confidence: f32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Entity {
#[prost(string, tag = "1")]
pub entity_id: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub description: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub language_code: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LabelAnnotation {
#[prost(message, optional, tag = "1")]
pub entity: ::core::option::Option<Entity>,
#[prost(message, repeated, tag = "2")]
pub category_entities: ::prost::alloc::vec::Vec<Entity>,
#[prost(message, repeated, tag = "3")]
pub segments: ::prost::alloc::vec::Vec<LabelSegment>,
#[prost(message, repeated, tag = "4")]
pub frames: ::prost::alloc::vec::Vec<LabelFrame>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExplicitContentFrame {
#[prost(message, optional, tag = "1")]
pub time_offset: ::core::option::Option<::prost_types::Duration>,
#[prost(enumeration = "Likelihood", tag = "2")]
pub pornography_likelihood: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExplicitContentAnnotation {
#[prost(message, repeated, tag = "1")]
pub frames: ::prost::alloc::vec::Vec<ExplicitContentFrame>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NormalizedBoundingBox {
#[prost(float, tag = "1")]
pub left: f32,
#[prost(float, tag = "2")]
pub top: f32,
#[prost(float, tag = "3")]
pub right: f32,
#[prost(float, tag = "4")]
pub bottom: f32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FaceSegment {
#[prost(message, optional, tag = "1")]
pub segment: ::core::option::Option<VideoSegment>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FaceFrame {
#[prost(message, repeated, tag = "1")]
pub normalized_bounding_boxes: ::prost::alloc::vec::Vec<NormalizedBoundingBox>,
#[prost(message, optional, tag = "2")]
pub time_offset: ::core::option::Option<::prost_types::Duration>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FaceAnnotation {
#[prost(bytes = "vec", tag = "1")]
pub thumbnail: ::prost::alloc::vec::Vec<u8>,
#[prost(message, repeated, tag = "2")]
pub segments: ::prost::alloc::vec::Vec<FaceSegment>,
#[prost(message, repeated, tag = "3")]
pub frames: ::prost::alloc::vec::Vec<FaceFrame>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoAnnotationResults {
#[prost(string, tag = "1")]
pub input_uri: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub segment_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>,
#[prost(message, repeated, tag = "3")]
pub shot_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>,
#[prost(message, repeated, tag = "4")]
pub frame_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>,
#[prost(message, repeated, tag = "5")]
pub face_annotations: ::prost::alloc::vec::Vec<FaceAnnotation>,
#[prost(message, repeated, tag = "6")]
pub shot_annotations: ::prost::alloc::vec::Vec<VideoSegment>,
#[prost(message, optional, tag = "7")]
pub explicit_annotation: ::core::option::Option<ExplicitContentAnnotation>,
#[prost(message, optional, tag = "9")]
pub error: ::core::option::Option<super::super::super::rpc::Status>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnotateVideoResponse {
#[prost(message, repeated, tag = "1")]
pub annotation_results: ::prost::alloc::vec::Vec<VideoAnnotationResults>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoAnnotationProgress {
#[prost(string, tag = "1")]
pub input_uri: ::prost::alloc::string::String,
#[prost(int32, tag = "2")]
pub progress_percent: i32,
#[prost(message, optional, tag = "3")]
pub start_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, optional, tag = "4")]
pub update_time: ::core::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnotateVideoProgress {
#[prost(message, repeated, tag = "1")]
pub annotation_progress: ::prost::alloc::vec::Vec<VideoAnnotationProgress>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Feature {
Unspecified = 0,
LabelDetection = 1,
ShotChangeDetection = 2,
ExplicitContentDetection = 3,
FaceDetection = 4,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum LabelDetectionMode {
Unspecified = 0,
ShotMode = 1,
FrameMode = 2,
ShotAndFrameMode = 3,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Likelihood {
Unspecified = 0,
VeryUnlikely = 1,
Unlikely = 2,
Possible = 3,
Likely = 4,
VeryLikely = 5,
}
#[doc = r" Generated client implementations."]
pub mod video_intelligence_service_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " Service that implements Google Cloud Video Intelligence API."]
#[derive(Debug, Clone)]
pub struct VideoIntelligenceServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> VideoIntelligenceServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> VideoIntelligenceServiceClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
VideoIntelligenceServiceClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Performs asynchronous video annotation. Progress and results can be"]
#[doc = " retrieved through the `google.longrunning.Operations` interface."]
#[doc = " `Operation.metadata` contains `AnnotateVideoProgress` (progress)."]
#[doc = " `Operation.response` contains `AnnotateVideoResponse` (results)."]
pub async fn annotate_video(
&mut self,
request: impl tonic::IntoRequest<super::AnnotateVideoRequest>,
) -> Result<
tonic::Response<super::super::super::super::longrunning::Operation>,
tonic::Status,
> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}