use glib::{prelude::*, translate::*};
use std::{alloc, any::TypeId, mem, ptr};
use crate::{Memory, ffi};
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
pub enum MemoryIntoInnerError {
#[error("Memory does not use the Rust allocator (uses {actual_allocator:?})")]
WrongAllocator { actual_allocator: Option<String> },
#[error("Memory is not writable")]
NotWritable,
#[error("Cannot extract wrapped value from sub-memory (shared memory)")]
SubMemory,
#[error("Memory does not wrap the requested type (expected {expected:?}, found {actual:?})")]
TypeMismatch { expected: TypeId, actual: TypeId },
#[error("Buffer must contain exactly one memory block")]
MultipleMemoryBlocks,
}
#[repr(C)]
struct WrappedMemory<T> {
mem: ffi::GstMemory,
data: *mut u8,
layout: alloc::Layout,
wrap_offset: usize,
wrap_drop_in_place: Option<unsafe fn(*mut ())>,
wrap_type_id: TypeId,
wrap: T,
}
unsafe extern "C" fn alloc(
allocator: *mut ffi::GstAllocator,
size: usize,
params: *mut ffi::GstAllocationParams,
) -> *mut ffi::GstMemory {
unsafe {
let params = &*params;
let Some(maxsize) = size
.checked_add(params.prefix)
.and_then(|s| s.checked_add(params.padding))
else {
return ptr::null_mut();
};
let align = params.align | crate::Memory::default_alignment();
let layout_base = alloc::Layout::new::<WrappedMemory<()>>();
let layout_data = match alloc::Layout::from_size_align(maxsize, align + 1) {
Ok(res) => res,
Err(err) => {
crate::warning!(
crate::CAT_RUST,
"Invalid size {maxsize} or alignment {align}: {err}"
);
return ptr::null_mut();
}
};
let (layout, data_offset) = match layout_base.extend(layout_data) {
Ok(res) => res,
Err(err) => {
crate::warning!(
crate::CAT_RUST,
"Can't extend base memory layout to {maxsize} or alignment {align}: {err}"
);
return ptr::null_mut();
}
};
let layout = layout.pad_to_align();
let mem = alloc::alloc(layout);
let data = mem.add(data_offset);
if params.prefix > 0 && (params.flags & ffi::GST_MEMORY_FLAG_ZERO_PREFIXED) != 0 {
ptr::write_bytes(data, 0, params.prefix);
}
if (params.flags & ffi::GST_MEMORY_FLAG_ZERO_PADDED) != 0 {
ptr::write_bytes(data.add(params.prefix).add(size), 0, params.padding);
}
let mem = mem as *mut WrappedMemory<()>;
ffi::gst_memory_init(
ptr::addr_of_mut!((*mem).mem),
params.flags,
allocator,
ptr::null_mut(),
maxsize,
params.align,
params.prefix,
size,
);
ptr::write(ptr::addr_of_mut!((*mem).data), data);
ptr::write(ptr::addr_of_mut!((*mem).layout), layout);
ptr::write(ptr::addr_of_mut!((*mem).wrap_type_id), TypeId::of::<()>());
ptr::write(ptr::addr_of_mut!((*mem).wrap_offset), 0);
ptr::write(ptr::addr_of_mut!((*mem).wrap_drop_in_place), None);
mem as *mut ffi::GstMemory
}
}
unsafe extern "C" fn free(_allocator: *mut ffi::GstAllocator, mem: *mut ffi::GstMemory) {
unsafe {
debug_assert_eq!((*mem).mini_object.refcount, 0);
let mem = mem as *mut WrappedMemory<()>;
if let Some(wrap_drop_in_place) = (*mem).wrap_drop_in_place {
let wrap = (mem as *mut u8).add((*mem).wrap_offset) as *mut ();
wrap_drop_in_place(wrap);
}
alloc::dealloc(mem as *mut u8, (*mem).layout);
}
}
unsafe extern "C" fn mem_map(
mem: *mut ffi::GstMemory,
_maxsize: usize,
_flags: ffi::GstMapFlags,
) -> glib::ffi::gpointer {
unsafe {
let mem = mem as *mut WrappedMemory<()>;
(*mem).data as glib::ffi::gpointer
}
}
unsafe extern "C" fn mem_unmap(_mem: *mut ffi::GstMemory) {}
unsafe extern "C" fn mem_share(
mem: *mut ffi::GstMemory,
offset: isize,
size: isize,
) -> *mut ffi::GstMemory {
unsafe {
let mem = mem as *mut WrappedMemory<()>;
let parent = if (*mem).mem.parent.is_null() {
mem
} else {
(*mem).mem.parent as *mut WrappedMemory<()>
};
let offset = offset as usize;
let mut size = size as usize;
let new_offset = (*mem).mem.offset.wrapping_add(offset);
debug_assert!(new_offset < (*mem).mem.maxsize);
if size == usize::MAX {
size = (*mem).mem.size.wrapping_sub(offset);
}
debug_assert!(new_offset <= usize::MAX - size);
debug_assert!(new_offset + size <= (*mem).mem.maxsize);
let layout = alloc::Layout::new::<WrappedMemory<()>>();
let sub = alloc::alloc(layout) as *mut WrappedMemory<()>;
ffi::gst_memory_init(
sub as *mut ffi::GstMemory,
(*mem).mem.mini_object.flags | ffi::GST_MINI_OBJECT_FLAG_LOCK_READONLY,
(*mem).mem.allocator,
parent as *mut ffi::GstMemory,
(*mem).mem.maxsize,
(*mem).mem.align,
new_offset,
size,
);
ptr::write(ptr::addr_of_mut!((*sub).data), (*mem).data);
ptr::write(ptr::addr_of_mut!((*sub).layout), layout);
ptr::write(ptr::addr_of_mut!((*sub).wrap_offset), 0);
ptr::write(ptr::addr_of_mut!((*sub).wrap_drop_in_place), None);
sub as *mut ffi::GstMemory
}
}
unsafe extern "C" fn mem_is_span(
mem1: *mut ffi::GstMemory,
mem2: *mut ffi::GstMemory,
offset: *mut usize,
) -> glib::ffi::gboolean {
unsafe {
let mem1 = mem1 as *mut WrappedMemory<()>;
let mem2 = mem2 as *mut WrappedMemory<()>;
let parent1 = (*mem1).mem.parent as *mut WrappedMemory<()>;
let parent2 = (*mem2).mem.parent as *mut WrappedMemory<()>;
debug_assert_eq!(parent1, parent2);
if !offset.is_null() {
*offset = (*mem1).mem.offset.wrapping_sub((*parent1).mem.offset);
}
let is_span = ((*mem1).mem.offset + ((*mem1).mem.size)) == (*mem2).mem.offset;
is_span.into_glib()
}
}
unsafe extern "C" fn class_init(class: glib::ffi::gpointer, _class_data: glib::ffi::gpointer) {
unsafe {
let class = class as *mut ffi::GstAllocatorClass;
(*class).alloc = Some(alloc);
(*class).free = Some(free);
}
}
unsafe extern "C" fn instance_init(
obj: *mut glib::gobject_ffi::GTypeInstance,
_class: glib::ffi::gpointer,
) {
unsafe {
static ALLOCATOR_TYPE: &[u8] = b"RustGlobalAllocatorMemory\0";
let allocator = obj as *mut ffi::GstAllocator;
(*allocator).mem_type = ALLOCATOR_TYPE.as_ptr() as *const _;
(*allocator).mem_map = Some(mem_map);
(*allocator).mem_unmap = Some(mem_unmap);
(*allocator).mem_share = Some(mem_share);
(*allocator).mem_is_span = Some(mem_is_span);
(*allocator).object.flags |= ffi::GST_OBJECT_FLAG_MAY_BE_LEAKED;
}
}
pub fn rust_allocator() -> &'static crate::Allocator {
assert_initialized_main_thread!();
rust_allocator_internal()
}
fn rust_allocator_internal() -> &'static crate::Allocator {
static RUST_ALLOCATOR: std::sync::OnceLock<crate::Allocator> = std::sync::OnceLock::new();
RUST_ALLOCATOR.get_or_init(|| unsafe {
struct TypeInfoWrap(glib::gobject_ffi::GTypeInfo);
unsafe impl Send for TypeInfoWrap {}
unsafe impl Sync for TypeInfoWrap {}
static TYPE_INFO: TypeInfoWrap = TypeInfoWrap(glib::gobject_ffi::GTypeInfo {
class_size: mem::size_of::<ffi::GstAllocatorClass>() as u16,
base_init: None,
base_finalize: None,
class_init: Some(class_init),
class_finalize: None,
class_data: ptr::null_mut(),
instance_size: mem::size_of::<ffi::GstAllocator>() as u16,
n_preallocs: 0,
instance_init: Some(instance_init),
value_table: ptr::null(),
});
let type_name = {
let mut idx = 0;
loop {
let type_name = glib::gformat!("GstRsAllocator-{}", idx);
if glib::gobject_ffi::g_type_from_name(type_name.as_ptr())
== glib::gobject_ffi::G_TYPE_INVALID
{
break type_name;
}
idx += 1;
}
};
let t = glib::gobject_ffi::g_type_register_static(
crate::Allocator::static_type().into_glib(),
type_name.as_ptr(),
&TYPE_INFO.0,
0,
);
assert!(t != glib::gobject_ffi::G_TYPE_INVALID);
from_glib_none(
glib::gobject_ffi::g_object_newv(t, 0, ptr::null_mut()) as *mut ffi::GstAllocator
)
})
}
#[inline]
pub(crate) unsafe fn try_into_from_memory_ptr<T: 'static>(
mem_ptr: *mut ffi::GstMemory,
) -> Result<T, MemoryIntoInnerError> {
skip_assert_initialized!();
unsafe {
if (*mem_ptr).allocator.is_null()
|| (*mem_ptr).allocator != rust_allocator_internal().as_ptr()
{
let actual_allocator = if (*mem_ptr).allocator.is_null() {
None
} else {
Some(
std::ffi::CStr::from_ptr(glib::gobject_ffi::g_type_name_from_instance(
(*mem_ptr).allocator as *mut glib::gobject_ffi::GTypeInstance,
))
.to_string_lossy()
.to_string(),
)
};
return Err(MemoryIntoInnerError::WrongAllocator { actual_allocator });
}
if ffi::gst_mini_object_is_writable(mem_ptr as *mut ffi::GstMiniObject) == glib::ffi::GFALSE
{
return Err(MemoryIntoInnerError::NotWritable);
}
if !(*mem_ptr).parent.is_null() {
return Err(MemoryIntoInnerError::SubMemory);
}
let mem_wrapper = &*(mem_ptr as *mut WrappedMemory<T>);
if mem_wrapper.wrap_type_id != TypeId::of::<T>() {
return Err(MemoryIntoInnerError::TypeMismatch {
expected: std::any::TypeId::of::<T>(),
actual: mem_wrapper.wrap_type_id,
});
}
let mem_wrapper_mut = &mut *(mem_ptr as *mut WrappedMemory<T>);
let value = ptr::read(&mem_wrapper_mut.wrap);
mem_wrapper_mut.wrap_drop_in_place = None;
Ok(value)
}
}
impl Memory {
#[doc(alias = "gst_memory_new_wrapped")]
#[doc(alias = "gst_memory_new_wrapped_full")]
#[inline]
pub fn from_slice<T: AsRef<[u8]> + Send + 'static>(slice: T) -> Self {
assert_initialized_main_thread!();
let len = slice.as_ref().len();
unsafe {
let layout = alloc::Layout::new::<WrappedMemory<T>>();
let mem = alloc::alloc(layout) as *mut WrappedMemory<T>;
ffi::gst_memory_init(
mem as *mut ffi::GstMemory,
ffi::GST_MINI_OBJECT_FLAG_LOCK_READONLY,
rust_allocator_internal().to_glib_none().0,
ptr::null_mut(),
len,
0,
0,
len,
);
ptr::write(ptr::addr_of_mut!((*mem).wrap), slice);
assert_eq!(len, (*mem).wrap.as_ref().len());
let data = (*mem).wrap.as_ref().as_ptr();
ptr::write(ptr::addr_of_mut!((*mem).data), mut_override(data));
ptr::write(ptr::addr_of_mut!((*mem).layout), layout);
let wrap_offset = ptr::addr_of!((*mem).wrap) as usize - mem as usize;
ptr::write(ptr::addr_of_mut!((*mem).wrap_offset), wrap_offset);
ptr::write(
ptr::addr_of_mut!((*mem).wrap_drop_in_place),
if mem::needs_drop::<T>() {
Some(|ptr| ptr::drop_in_place::<T>(ptr as *mut T))
} else {
None
},
);
ptr::write(ptr::addr_of_mut!((*mem).wrap_type_id), TypeId::of::<T>());
from_glib_full(mem as *mut ffi::GstMemory)
}
}
#[doc(alias = "gst_memory_new_wrapped")]
#[doc(alias = "gst_memory_new_wrapped_full")]
#[inline]
pub fn from_mut_slice<T: AsMut<[u8]> + Send + 'static>(mut slice: T) -> Self {
assert_initialized_main_thread!();
let len = slice.as_mut().len();
unsafe {
let layout = alloc::Layout::new::<WrappedMemory<T>>();
let mem = alloc::alloc(layout) as *mut WrappedMemory<T>;
ffi::gst_memory_init(
mem as *mut ffi::GstMemory,
0,
rust_allocator_internal().to_glib_none().0,
ptr::null_mut(),
len,
0,
0,
len,
);
ptr::write(ptr::addr_of_mut!((*mem).wrap), slice);
assert_eq!(len, (*mem).wrap.as_mut().len());
let data = (*mem).wrap.as_mut().as_mut_ptr();
ptr::write(ptr::addr_of_mut!((*mem).data), data);
ptr::write(ptr::addr_of_mut!((*mem).layout), layout);
let wrap_offset = ptr::addr_of!((*mem).wrap) as usize - mem as usize;
ptr::write(ptr::addr_of_mut!((*mem).wrap_offset), wrap_offset);
ptr::write(
ptr::addr_of_mut!((*mem).wrap_drop_in_place),
if mem::needs_drop::<T>() {
Some(|ptr| ptr::drop_in_place::<T>(ptr as *mut T))
} else {
None
},
);
ptr::write(ptr::addr_of_mut!((*mem).wrap_type_id), TypeId::of::<T>());
from_glib_full(mem as *mut ffi::GstMemory)
}
}
#[inline]
pub fn try_into_inner<T: 'static>(self) -> Result<T, (Self, MemoryIntoInnerError)> {
unsafe {
let mem_ptr = self.as_mut_ptr();
match try_into_from_memory_ptr(mem_ptr) {
Ok(value) => {
drop(self);
Ok(value)
}
Err(err) => {
Err((self, err))
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_alloc() {
use crate::prelude::AllocatorExt;
crate::init().unwrap();
let data = [0, 1, 2, 3, 4, 5, 6, 7];
let mut mem = rust_allocator().alloc(data.len(), None).unwrap();
assert_eq!(mem.size(), data.len());
assert_eq!(mem.allocator().unwrap(), rust_allocator());
{
let mem = mem.get_mut().unwrap();
let mut map = mem.map_writable().unwrap();
assert_eq!(
map.as_ptr() as usize & crate::Memory::default_alignment(),
0
);
map.copy_from_slice(&data);
}
let copy = mem.copy();
assert_eq!(copy.size(), data.len());
assert_eq!(copy.allocator().unwrap(), rust_allocator());
{
let map = copy.map_readable().unwrap();
assert_eq!(
copy.as_ptr() as usize & crate::Memory::default_alignment(),
0
);
assert_eq!(map.as_slice(), &data);
}
let mut mem = rust_allocator()
.alloc(
data.len(),
Some(&crate::AllocationParams::new(
crate::MemoryFlags::empty(),
1023,
0,
0,
)),
)
.unwrap();
assert_eq!(mem.size(), data.len());
assert_eq!(mem.maxsize(), data.len());
assert_eq!(mem.allocator().unwrap(), rust_allocator());
{
let mem = mem.get_mut().unwrap();
let mut map = mem.map_writable().unwrap();
assert_eq!(map.as_ptr() as usize & 1023, 0);
map.copy_from_slice(&data);
}
let copy = mem.copy();
assert_eq!(copy.size(), data.len());
assert_eq!(copy.allocator().unwrap(), rust_allocator());
{
let map = copy.map_readable().unwrap();
assert_eq!(map.as_slice(), &data);
}
let share = mem.share(2..4);
assert_eq!(share.size(), 2);
assert_eq!(share.allocator().unwrap(), rust_allocator());
{
let map = share.map_readable().unwrap();
assert_eq!(map.as_slice(), &data[2..4]);
}
let mut mem = rust_allocator()
.alloc(
data.len(),
Some(&crate::AllocationParams::new(
crate::MemoryFlags::ZERO_PADDED | crate::MemoryFlags::ZERO_PREFIXED,
1023,
32,
32,
)),
)
.unwrap();
assert_eq!(mem.size(), data.len());
assert_eq!(mem.maxsize(), data.len() + 32 + 32);
assert_eq!(mem.allocator().unwrap(), rust_allocator());
{
let mem = mem.get_mut().unwrap();
let mut map = mem.map_writable().unwrap();
assert_eq!((map.as_ptr() as usize - 32) & 1023, 0);
map.copy_from_slice(&data);
}
let copy = mem.copy();
assert_eq!(copy.size(), data.len());
assert_eq!(copy.allocator().unwrap(), rust_allocator());
{
let map = copy.map_readable().unwrap();
assert_eq!(map.as_slice(), &data);
}
let share = mem.share(2..4);
assert_eq!(share.size(), 2);
assert_eq!(share.allocator().unwrap(), rust_allocator());
{
let map = share.map_readable().unwrap();
assert_eq!(map.as_slice(), &data[2..4]);
}
let share = mem.share_maxsize(0..(data.len() + 32 + 32));
assert_eq!(share.size(), data.len() + 32 + 32);
assert_eq!(share.allocator().unwrap(), rust_allocator());
{
let map = share.map_readable().unwrap();
let padding = [0; 32];
assert_eq!(&map.as_slice()[..32], &padding);
assert_eq!(&map.as_slice()[32..][..data.len()], &data);
assert_eq!(&map.as_slice()[(32 + data.len())..], &padding);
}
}
#[test]
fn test_wrap_vec_u8() {
crate::init().unwrap();
let data = vec![1u8, 2, 3, 4, 5];
let expected = data.clone();
let mem = Memory::from_slice(data);
assert_eq!(mem.size(), 5);
{
let map = mem.map_readable().unwrap();
assert_eq!(&expected, map.as_slice());
}
}
#[test]
fn test_wrap_array_u8() {
crate::init().unwrap();
let data: [u8; 5] = [1u8, 2, 3, 4, 5];
let expected = data;
let mem = Memory::from_slice(data);
assert_eq!(mem.size(), 5);
assert_eq!(mem.size(), 5);
{
let map = mem.map_readable().unwrap();
assert_eq!(&expected, map.as_slice());
}
}
#[test]
fn test_wrap_vec_u8_and_back() {
crate::init().unwrap();
let data = vec![1u8, 2, 3, 4, 5];
let expected = data.clone();
let mem = Memory::from_slice(data);
assert_eq!(mem.size(), 5);
{
let map = mem.map_readable().unwrap();
assert_eq!(&expected, map.as_slice());
}
let extracted: Vec<u8> = mem.try_into_inner().unwrap();
assert_eq!(extracted, expected);
}
#[test]
fn test_wrap_array_u8_and_back() {
crate::init().unwrap();
let data: [u8; 5] = [1u8, 2, 3, 4, 5];
let expected = data;
let mem = Memory::from_slice(data);
assert_eq!(mem.size(), 5);
assert_eq!(mem.size(), 5);
{
let map = mem.map_readable().unwrap();
assert_eq!(&expected, map.as_slice());
}
let extracted: [u8; 5] = mem.try_into_inner().unwrap();
assert_eq!(extracted, expected);
}
#[test]
fn test_wrap_array_u8_mem_ops() {
crate::init().unwrap();
let data = [0, 1, 2, 3, 4, 5, 6, 7];
let memory = Memory::from_slice(data);
assert_eq!(memory.size(), data.len());
{
let map = memory.map_readable().unwrap();
assert_eq!(map.as_slice(), &data);
}
let copy = memory.copy();
assert!(copy.parent().is_none());
{
let map1 = memory.map_readable().unwrap();
let map2 = copy.map_readable().unwrap();
assert_eq!(map1.as_slice(), map2.as_slice());
}
let share = memory.share(..);
assert_eq!(share.parent().unwrap().as_ptr(), memory.as_ptr());
{
let map1 = memory.map_readable().unwrap();
let map2 = share.map_readable().unwrap();
assert_eq!(map1.as_slice(), map2.as_slice());
}
let sub1 = memory.share(..2);
assert_eq!(sub1.size(), 2);
assert_eq!(sub1.parent().unwrap().as_ptr(), memory.as_ptr());
{
let map = sub1.map_readable().unwrap();
assert_eq!(map.as_slice(), &data[..2]);
}
let sub2 = memory.share(2..);
assert_eq!(sub2.size(), 6);
assert_eq!(sub2.parent().unwrap().as_ptr(), memory.as_ptr());
{
let map = sub2.map_readable().unwrap();
assert_eq!(map.as_slice(), &data[2..]);
}
let offset = sub1.is_span(&sub2).unwrap();
assert_eq!(offset, 0);
let sub3 = sub2.share(2..);
assert_eq!(sub3.size(), 4);
assert_eq!(sub3.parent().unwrap().as_ptr(), memory.as_ptr());
{
let map = sub3.map_readable().unwrap();
assert_eq!(map.as_slice(), &data[4..]);
}
}
}