#[cfg(test)]
mod tests;
use crate::data_structures::{AppData, RtpPacketTraceInfo, SsrcTraceInfo, TraceEventDirection};
use crate::messages::{
ConsumerCloseRequest, ConsumerDumpRequest, ConsumerEnableTraceEventRequest,
ConsumerGetStatsRequest, ConsumerPauseRequest, ConsumerRequestKeyFrameRequest,
ConsumerResumeRequest, ConsumerSetPreferredLayersRequest, ConsumerSetPriorityRequest,
};
use crate::producer::{Producer, ProducerId, ProducerStat, ProducerType, WeakProducer};
use crate::rtp_parameters::{MediaKind, MimeType, RtpCapabilities, RtpParameters};
use crate::scalability_modes::ScalabilityMode;
use crate::transport::Transport;
use crate::uuid_based_wrapper_type;
use crate::worker::{Channel, PayloadChannel, RequestError, SubscriptionHandler};
use async_executor::Executor;
use event_listener_primitives::{Bag, BagOnce, HandlerId};
use log::{debug, error};
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::fmt::Debug;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Weak};
uuid_based_wrapper_type!(
ConsumerId
);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ConsumerLayers {
pub spatial_layer: u8,
pub temporal_layer: Option<u8>,
}
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ConsumerScore {
pub score: u8,
pub producer_score: u8,
pub producer_scores: Vec<u8>,
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct ConsumerOptions {
pub producer_id: ProducerId,
pub rtp_capabilities: RtpCapabilities,
pub paused: bool,
pub mid: Option<String>,
pub preferred_layers: Option<ConsumerLayers>,
pub ignore_dtx: bool,
pub pipe: bool,
pub app_data: AppData,
}
impl ConsumerOptions {
#[must_use]
pub fn new(producer_id: ProducerId, rtp_capabilities: RtpCapabilities) -> Self {
Self {
producer_id,
rtp_capabilities,
paused: false,
preferred_layers: None,
ignore_dtx: false,
pipe: false,
mid: None,
app_data: AppData::default(),
}
}
}
#[derive(Debug, Clone, PartialOrd, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[doc(hidden)]
pub struct RtpStreamParams {
pub clock_rate: u32,
pub cname: String,
pub encoding_idx: usize,
pub mime_type: MimeType,
pub payload_type: u8,
pub spatial_layers: u8,
pub ssrc: u32,
pub temporal_layers: u8,
pub use_dtx: bool,
pub use_in_band_fec: bool,
pub use_nack: bool,
pub use_pli: bool,
pub rid: Option<String>,
pub rtc_ssrc: Option<u32>,
pub rtc_payload_type: Option<u8>,
}
#[derive(Debug, Clone, PartialOrd, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[doc(hidden)]
pub struct RtpStream {
pub params: RtpStreamParams,
pub score: u8,
}
#[derive(Debug, Clone, PartialOrd, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[doc(hidden)]
pub struct RtpRtxParameters {
pub ssrc: Option<u32>,
}
#[derive(Debug, Clone, PartialOrd, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[doc(hidden)]
pub struct ConsumableRtpEncoding {
pub ssrc: Option<u32>,
pub rid: Option<String>,
pub codec_payload_type: Option<u8>,
pub rtx: Option<RtpRtxParameters>,
pub max_bitrate: Option<u32>,
pub max_framerate: Option<f64>,
pub dtx: Option<bool>,
#[serde(default, skip_serializing_if = "ScalabilityMode::is_none")]
pub scalability_mode: ScalabilityMode,
pub spatial_layers: Option<u8>,
pub temporal_layers: Option<u8>,
pub ksvc: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[doc(hidden)]
#[non_exhaustive]
pub struct ConsumerDump {
pub id: ConsumerId,
pub kind: MediaKind,
pub paused: bool,
pub priority: u8,
pub producer_id: ProducerId,
pub producer_paused: bool,
pub rtp_parameters: RtpParameters,
pub supported_codec_payload_types: Vec<u8>,
pub trace_event_types: String,
pub r#type: ConsumerType,
pub consumable_rtp_encodings: Vec<ConsumableRtpEncoding>,
pub rtp_stream: RtpStream,
pub preferred_spatial_layer: Option<i16>,
pub target_spatial_layer: Option<i16>,
pub current_spatial_layer: Option<i16>,
pub preferred_temporal_layer: Option<i16>,
pub target_temporal_layer: Option<i16>,
pub current_temporal_layer: Option<i16>,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum ConsumerType {
Simple,
Simulcast,
Svc,
Pipe,
}
impl From<ProducerType> for ConsumerType {
fn from(producer_type: ProducerType) -> Self {
match producer_type {
ProducerType::Simple => ConsumerType::Simple,
ProducerType::Simulcast => ConsumerType::Simulcast,
ProducerType::Svc => ConsumerType::Svc,
}
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(missing_docs)]
#[non_exhaustive]
pub struct ConsumerStat {
pub timestamp: u64,
pub ssrc: u32,
pub rtx_ssrc: Option<u32>,
pub kind: MediaKind,
pub mime_type: MimeType,
pub packets_lost: u32,
pub fraction_lost: u8,
pub packets_discarded: usize,
pub packets_retransmitted: usize,
pub packets_repaired: usize,
pub nack_count: usize,
pub nack_packet_count: usize,
pub pli_count: usize,
pub fir_count: usize,
pub score: u8,
pub packet_count: usize,
pub byte_count: usize,
pub bitrate: u32,
pub round_trip_time: Option<f32>,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Deserialize, Serialize)]
#[serde(untagged)]
pub enum ConsumerStats {
JustConsumer((ConsumerStat,)),
WithProducer((ConsumerStat, ProducerStat)),
}
impl ConsumerStats {
pub fn consumer_stats(&self) -> &ConsumerStat {
match self {
ConsumerStats::JustConsumer((consumer_stat,)) => consumer_stat,
ConsumerStats::WithProducer((consumer_stat, _)) => consumer_stat,
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(tag = "type", rename_all = "lowercase")]
pub enum ConsumerTraceEventData {
Rtp {
timestamp: u64,
direction: TraceEventDirection,
info: RtpPacketTraceInfo,
},
KeyFrame {
timestamp: u64,
direction: TraceEventDirection,
info: RtpPacketTraceInfo,
},
Nack {
timestamp: u64,
direction: TraceEventDirection,
},
Pli {
timestamp: u64,
direction: TraceEventDirection,
info: SsrcTraceInfo,
},
Fir {
timestamp: u64,
direction: TraceEventDirection,
info: SsrcTraceInfo,
},
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum ConsumerTraceEventType {
Rtp,
KeyFrame,
Nack,
Pli,
Fir,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "event", rename_all = "lowercase", content = "data")]
enum Notification {
ProducerClose,
ProducerPause,
ProducerResume,
Score(ConsumerScore),
LayersChange(Option<ConsumerLayers>),
Trace(ConsumerTraceEventData),
}
#[derive(Debug, Deserialize)]
#[serde(tag = "event", rename_all = "lowercase", content = "data")]
enum PayloadNotification {
Rtp,
}
#[derive(Default)]
#[allow(clippy::type_complexity)]
struct Handlers {
rtp: Bag<Arc<dyn Fn(&[u8]) + Send + Sync>>,
pause: Bag<Arc<dyn Fn() + Send + Sync>>,
resume: Bag<Arc<dyn Fn() + Send + Sync>>,
producer_pause: Bag<Arc<dyn Fn() + Send + Sync>>,
producer_resume: Bag<Arc<dyn Fn() + Send + Sync>>,
score: Bag<Arc<dyn Fn(&ConsumerScore) + Send + Sync>, ConsumerScore>,
#[allow(clippy::type_complexity)]
layers_change: Bag<Arc<dyn Fn(&Option<ConsumerLayers>) + Send + Sync>, Option<ConsumerLayers>>,
trace: Bag<Arc<dyn Fn(&ConsumerTraceEventData) + Send + Sync>, ConsumerTraceEventData>,
producer_close: BagOnce<Box<dyn FnOnce() + Send>>,
transport_close: BagOnce<Box<dyn FnOnce() + Send>>,
close: BagOnce<Box<dyn FnOnce() + Send>>,
}
struct Inner {
id: ConsumerId,
producer_id: ProducerId,
kind: MediaKind,
r#type: ConsumerType,
rtp_parameters: RtpParameters,
paused: Arc<Mutex<bool>>,
executor: Arc<Executor<'static>>,
channel: Channel,
producer_paused: Arc<Mutex<bool>>,
priority: Mutex<u8>,
score: Arc<Mutex<ConsumerScore>>,
preferred_layers: Mutex<Option<ConsumerLayers>>,
current_layers: Arc<Mutex<Option<ConsumerLayers>>>,
handlers: Arc<Handlers>,
app_data: AppData,
transport: Arc<dyn Transport>,
weak_producer: WeakProducer,
closed: Arc<AtomicBool>,
_subscription_handlers: Mutex<Vec<Option<SubscriptionHandler>>>,
_on_transport_close_handler: Mutex<HandlerId>,
}
impl Drop for Inner {
fn drop(&mut self) {
debug!("drop()");
self.close(true);
}
}
impl Inner {
fn close(&self, close_request: bool) {
if !self.closed.swap(true, Ordering::SeqCst) {
debug!("close()");
self.handlers.close.call_simple();
if close_request {
let channel = self.channel.clone();
let transport_id = self.transport.id();
let request = ConsumerCloseRequest {
consumer_id: self.id,
};
let weak_producer = self.weak_producer.clone();
self.executor
.spawn(async move {
if weak_producer.upgrade().is_some() {
if let Err(error) = channel.request(transport_id, request).await {
error!("consumer closing failed on drop: {}", error);
}
}
})
.detach();
}
}
}
}
#[derive(Clone)]
#[must_use = "Consumer will be closed on drop, make sure to keep it around for as long as needed"]
pub struct Consumer {
inner: Arc<Inner>,
}
impl fmt::Debug for Consumer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Consumer")
.field("id", &self.inner.id)
.field("producer_id", &self.inner.producer_id)
.field("kind", &self.inner.kind)
.field("type", &self.inner.r#type)
.field("rtp_parameters", &self.inner.rtp_parameters)
.field("paused", &self.inner.paused)
.field("producer_paused", &self.inner.producer_paused)
.field("priority", &self.inner.priority)
.field("score", &self.inner.score)
.field("preferred_layers", &self.inner.preferred_layers)
.field("current_layers", &self.inner.current_layers)
.field("transport", &self.inner.transport)
.field("closed", &self.inner.closed)
.finish()
}
}
impl Consumer {
#[allow(clippy::too_many_arguments)]
pub(super) fn new(
id: ConsumerId,
producer: Producer,
r#type: ConsumerType,
rtp_parameters: RtpParameters,
paused: bool,
executor: Arc<Executor<'static>>,
channel: Channel,
payload_channel: &PayloadChannel,
producer_paused: bool,
score: ConsumerScore,
preferred_layers: Option<ConsumerLayers>,
app_data: AppData,
transport: Arc<dyn Transport>,
) -> Self {
debug!("new()");
let handlers = Arc::<Handlers>::default();
let score = Arc::new(Mutex::new(score));
let closed = Arc::new(AtomicBool::new(false));
#[allow(clippy::mutex_atomic)]
let paused = Arc::new(Mutex::new(paused));
#[allow(clippy::mutex_atomic)]
let producer_paused = Arc::new(Mutex::new(producer_paused));
let current_layers = Arc::<Mutex<Option<ConsumerLayers>>>::default();
let inner_weak = Arc::<Mutex<Option<Weak<Inner>>>>::default();
let subscription_handler = {
let handlers = Arc::clone(&handlers);
let closed = Arc::clone(&closed);
let paused = Arc::clone(&paused);
let producer_paused = Arc::clone(&producer_paused);
let score = Arc::clone(&score);
let current_layers = Arc::clone(¤t_layers);
let inner_weak = Arc::clone(&inner_weak);
channel.subscribe_to_notifications(id.into(), move |notification| {
match serde_json::from_slice::<Notification>(notification) {
Ok(notification) => match notification {
Notification::ProducerClose => {
if !closed.load(Ordering::SeqCst) {
handlers.producer_close.call_simple();
let maybe_inner =
inner_weak.lock().as_ref().and_then(Weak::upgrade);
if let Some(inner) = maybe_inner {
inner
.executor
.clone()
.spawn(async move {
inner.close(false);
})
.detach();
}
}
}
Notification::ProducerPause => {
let mut producer_paused = producer_paused.lock();
let was_paused = *paused.lock() || *producer_paused;
*producer_paused = true;
handlers.producer_pause.call_simple();
if !was_paused {
handlers.pause.call_simple();
}
}
Notification::ProducerResume => {
let mut producer_paused = producer_paused.lock();
let paused = *paused.lock();
let was_paused = paused || *producer_paused;
*producer_paused = false;
handlers.producer_resume.call_simple();
if was_paused && !paused {
handlers.resume.call_simple();
}
}
Notification::Score(consumer_score) => {
*score.lock() = consumer_score.clone();
handlers.score.call_simple(&consumer_score);
}
Notification::LayersChange(consumer_layers) => {
*current_layers.lock() = consumer_layers;
handlers.layers_change.call_simple(&consumer_layers);
}
Notification::Trace(trace_event_data) => {
handlers.trace.call_simple(&trace_event_data);
}
},
Err(error) => {
error!("Failed to parse notification: {}", error);
}
}
})
};
let payload_subscription_handler = {
let handlers = Arc::clone(&handlers);
payload_channel.subscribe_to_notifications(id.into(), move |message, payload| {
match serde_json::from_slice::<PayloadNotification>(message) {
Ok(notification) => match notification {
PayloadNotification::Rtp => {
handlers.rtp.call(|callback| {
callback(payload);
});
}
},
Err(error) => {
error!("Failed to parse payload notification: {}", error);
}
}
})
};
let on_transport_close_handler = transport.on_close({
let inner_weak = Arc::clone(&inner_weak);
Box::new(move || {
let maybe_inner = inner_weak.lock().as_ref().and_then(Weak::upgrade);
if let Some(inner) = maybe_inner {
inner.handlers.transport_close.call_simple();
inner.close(false);
}
})
});
let inner = Arc::new(Inner {
id,
producer_id: producer.id(),
kind: producer.kind(),
r#type,
rtp_parameters,
paused,
producer_paused,
priority: Mutex::new(1_u8),
score,
preferred_layers: Mutex::new(preferred_layers),
current_layers,
executor,
channel,
handlers,
app_data,
transport,
weak_producer: producer.downgrade(),
closed,
_subscription_handlers: Mutex::new(vec![
subscription_handler,
payload_subscription_handler,
]),
_on_transport_close_handler: Mutex::new(on_transport_close_handler),
});
inner_weak.lock().replace(Arc::downgrade(&inner));
Self { inner }
}
#[must_use]
pub fn id(&self) -> ConsumerId {
self.inner.id
}
#[must_use]
pub fn producer_id(&self) -> ProducerId {
self.inner.producer_id
}
pub fn transport(&self) -> &Arc<dyn Transport> {
&self.inner.transport
}
#[must_use]
pub fn kind(&self) -> MediaKind {
self.inner.kind
}
#[must_use]
pub fn rtp_parameters(&self) -> &RtpParameters {
&self.inner.rtp_parameters
}
#[must_use]
pub fn r#type(&self) -> ConsumerType {
self.inner.r#type
}
#[must_use]
pub fn paused(&self) -> bool {
*self.inner.paused.lock()
}
#[must_use]
pub fn producer_paused(&self) -> bool {
*self.inner.producer_paused.lock()
}
#[must_use]
pub fn priority(&self) -> u8 {
*self.inner.priority.lock()
}
#[must_use]
pub fn score(&self) -> ConsumerScore {
self.inner.score.lock().clone()
}
#[must_use]
pub fn preferred_layers(&self) -> Option<ConsumerLayers> {
*self.inner.preferred_layers.lock()
}
#[must_use]
pub fn current_layers(&self) -> Option<ConsumerLayers> {
*self.inner.current_layers.lock()
}
#[must_use]
pub fn app_data(&self) -> &AppData {
&self.inner.app_data
}
#[must_use]
pub fn closed(&self) -> bool {
self.inner.closed.load(Ordering::SeqCst)
}
#[doc(hidden)]
pub async fn dump(&self) -> Result<ConsumerDump, RequestError> {
debug!("dump()");
self.inner
.channel
.request(self.id(), ConsumerDumpRequest {})
.await
}
pub async fn get_stats(&self) -> Result<ConsumerStats, RequestError> {
debug!("get_stats()");
self.inner
.channel
.request(self.id(), ConsumerGetStatsRequest {})
.await
}
pub async fn pause(&self) -> Result<(), RequestError> {
debug!("pause()");
self.inner
.channel
.request(self.id(), ConsumerPauseRequest {})
.await?;
let mut paused = self.inner.paused.lock();
let was_paused = *paused || *self.inner.producer_paused.lock();
*paused = true;
if !was_paused {
self.inner.handlers.pause.call_simple();
}
Ok(())
}
pub async fn resume(&self) -> Result<(), RequestError> {
debug!("resume()");
self.inner
.channel
.request(self.id(), ConsumerResumeRequest {})
.await?;
let mut paused = self.inner.paused.lock();
let was_paused = *paused || *self.inner.producer_paused.lock();
*paused = false;
if was_paused {
self.inner.handlers.resume.call_simple();
}
Ok(())
}
pub async fn set_preferred_layers(
&self,
consumer_layers: ConsumerLayers,
) -> Result<(), RequestError> {
debug!("set_preferred_layers()");
let consumer_layers = self
.inner
.channel
.request(
self.id(),
ConsumerSetPreferredLayersRequest {
data: consumer_layers,
},
)
.await?;
*self.inner.preferred_layers.lock() = consumer_layers;
Ok(())
}
pub async fn set_priority(&self, priority: u8) -> Result<(), RequestError> {
debug!("set_preferred_layers()");
let result = self
.inner
.channel
.request(self.id(), ConsumerSetPriorityRequest { priority })
.await?;
*self.inner.priority.lock() = result.priority;
Ok(())
}
pub async fn unset_priority(&self) -> Result<(), RequestError> {
debug!("unset_priority()");
let priority = 1;
let result = self
.inner
.channel
.request(self.id(), ConsumerSetPriorityRequest { priority })
.await?;
*self.inner.priority.lock() = result.priority;
Ok(())
}
pub async fn request_key_frame(&self) -> Result<(), RequestError> {
debug!("request_key_frame()");
self.inner
.channel
.request(self.id(), ConsumerRequestKeyFrameRequest {})
.await
}
pub async fn enable_trace_event(
&self,
types: Vec<ConsumerTraceEventType>,
) -> Result<(), RequestError> {
debug!("enable_trace_event()");
self.inner
.channel
.request(self.id(), ConsumerEnableTraceEventRequest { types })
.await
}
pub fn on_rtp<F: Fn(&[u8]) + Send + Sync + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.rtp.add(Arc::new(callback))
}
pub fn on_pause<F: Fn() + Send + Sync + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.pause.add(Arc::new(callback))
}
pub fn on_resume<F: Fn() + Send + Sync + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.resume.add(Arc::new(callback))
}
pub fn on_producer_pause<F: Fn() + Send + Sync + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.producer_pause.add(Arc::new(callback))
}
pub fn on_producer_resume<F: Fn() + Send + Sync + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.producer_resume.add(Arc::new(callback))
}
pub fn on_score<F: Fn(&ConsumerScore) + Send + Sync + 'static>(
&self,
callback: F,
) -> HandlerId {
self.inner.handlers.score.add(Arc::new(callback))
}
pub fn on_layers_change<F: Fn(&Option<ConsumerLayers>) + Send + Sync + 'static>(
&self,
callback: F,
) -> HandlerId {
self.inner.handlers.layers_change.add(Arc::new(callback))
}
pub fn on_trace<F: Fn(&ConsumerTraceEventData) + Send + Sync + 'static>(
&self,
callback: F,
) -> HandlerId {
self.inner.handlers.trace.add(Arc::new(callback))
}
pub fn on_producer_close<F: FnOnce() + Send + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.producer_close.add(Box::new(callback))
}
pub fn on_transport_close<F: FnOnce() + Send + 'static>(&self, callback: F) -> HandlerId {
self.inner.handlers.transport_close.add(Box::new(callback))
}
pub fn on_close<F: FnOnce() + Send + 'static>(&self, callback: F) -> HandlerId {
let handler_id = self.inner.handlers.close.add(Box::new(callback));
if self.inner.closed.load(Ordering::Relaxed) {
self.inner.handlers.close.call_simple();
}
handler_id
}
#[must_use]
pub fn downgrade(&self) -> WeakConsumer {
WeakConsumer {
inner: Arc::downgrade(&self.inner),
}
}
}
#[derive(Clone)]
pub struct WeakConsumer {
inner: Weak<Inner>,
}
impl fmt::Debug for WeakConsumer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WeakConsumer").finish()
}
}
impl WeakConsumer {
#[must_use]
pub fn upgrade(&self) -> Option<Consumer> {
let inner = self.inner.upgrade()?;
Some(Consumer { inner })
}
}