1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#![no_std]
#![warn(missing_docs)]
#![allow(clippy::size_of_in_element_count)]
#[cfg(feature = "glam")]
use glam::{
DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, Quat,
UVec2, UVec3, UVec4, Vec2, Vec3, Vec4,
};
#[cfg(feature = "half")]
use half::{bf16, f16};
pub trait AsBytes {
fn as_bytes(&self) -> &[u8];
}
pub trait AsBytesMut {
fn as_bytes_mut(&mut self) -> &mut [u8];
}
macro_rules! impl_types {
($($ty:ty)+) => {
$(
impl AsBytes for $ty {
fn as_bytes(&self) -> &[u8] {
unsafe { &*(self as *const $ty as *const [u8; core::mem::size_of::<$ty>()]) }
}
}
impl AsBytesMut for $ty {
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { &mut*(self as *mut $ty as *mut [u8; core::mem::size_of::<$ty>()]) }
}
}
impl<const N: usize> AsBytes for [$ty; N] {
fn as_bytes(&self) -> &[u8] {
unsafe {
core::slice::from_raw_parts(
self.as_ptr().cast(),
self.len() * core::mem::size_of::<$ty>(),
)
}
}
}
impl<const N: usize> AsBytesMut for [$ty; N] {
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
core::slice::from_raw_parts_mut(
self.as_mut_ptr().cast(),
self.len() * core::mem::size_of::<$ty>(),
)
}
}
}
)*
}
}
impl AsBytes for u8 {
fn as_bytes(&self) -> &[u8] {
core::slice::from_ref(self)
}
}
impl AsBytesMut for u8 {
fn as_bytes_mut(&mut self) -> &mut [u8] {
core::slice::from_mut(self)
}
}
impl<const N: usize> AsBytes for [u8; N] {
fn as_bytes(&self) -> &[u8] {
self
}
}
impl<const N: usize> AsBytesMut for [u8; N] {
fn as_bytes_mut(&mut self) -> &mut [u8] {
self
}
}
impl_types!(u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize f32 f64);
#[cfg(feature = "half")]
impl_types!(f16 bf16);
#[cfg(feature = "glam")]
impl_types!(DMat2 DMat3 DMat4 DQuat DVec2 DVec3 DVec4 IVec2 IVec3 IVec4 Mat2 Mat3 Mat4 Quat UVec2 UVec3 UVec4 Vec2 Vec3 Vec4);