mod generated_tests {
#![allow(non_snake_case)]
#![allow(improper_ctypes)]
#![deny(improper_ctypes_definitions)]
#[allow(unused_imports)]
use std::ffi::{CStr, c_int, c_char, c_uint};
use std::fmt::{Debug, Write};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
#[allow(unused_imports)]
use std::{mem, ptr, slice};
#[allow(unused_imports)]
use std::mem::{MaybeUninit, offset_of};
use super::*;
pub static FAILED: AtomicBool = AtomicBool::new(false);
pub static NTESTS: AtomicUsize = AtomicUsize::new(0);
fn check_same<T: PartialEq + Debug>(rust: T, c: T, attr: &str) {
if rust != c {
eprintln!("bad {attr}: rust: {rust:?} != c {c:?}");
FAILED.store(true, Ordering::Relaxed);
} else {
NTESTS.fetch_add(1, Ordering::Relaxed);
}
}
fn check_same_bytes(rust: &[u8], c: &[u8], attr: &str) {
if rust == c {
NTESTS.fetch_add(1, Ordering::Relaxed);
return;
}
FAILED.store(true, Ordering::Relaxed);
let mut s = String::new();
if rust.len() == c.len() {
for (i, (&rb, &cb)) in rust.iter().zip(c.iter()).enumerate() {
if rb != cb {
writeln!(
s, "bad {attr} at byte {i}: rust: {rb:?} ({rb:#x}) != c {cb:?} ({cb:#x})"
).unwrap();
break;
}
}
} else {
writeln!(s, "bad {attr}: rust len {} != c len {}", rust.len(), c.len()).unwrap();
}
write!(s, " rust bytes:").unwrap();
for b in rust {
write!(s, " {b:02x}").unwrap();
}
write!(s, "\n c bytes: ").unwrap();
for b in c {
write!(s, " {b:02x}").unwrap();
}
eprintln!("{s}");
}
pub fn ctest_size_align_VecU8() {
extern "C" {
fn ctest_size_of__VecU8() -> u64;
fn ctest_align_of__VecU8() -> u64;
}
let rust_size = size_of::<VecU8>() as u64;
let c_size = unsafe { ctest_size_of__VecU8() };
let rust_align = align_of::<VecU8>() as u64;
let c_align = unsafe { ctest_align_of__VecU8() };
check_same(rust_size, c_size, "`VecU8` size");
check_same(rust_align, c_align, "`VecU8` align");
}
pub fn ctest_size_align_VecU16() {
extern "C" {
fn ctest_size_of__VecU16() -> u64;
fn ctest_align_of__VecU16() -> u64;
}
let rust_size = size_of::<VecU16>() as u64;
let c_size = unsafe { ctest_size_of__VecU16() };
let rust_align = align_of::<VecU16>() as u64;
let c_align = unsafe { ctest_align_of__VecU16() };
check_same(rust_size, c_size, "`VecU16` size");
check_same(rust_align, c_align, "`VecU16` align");
}
pub fn ctest_field_size_offset_VecU8_x() {
extern "C" {
fn ctest_offset_of__VecU8__x() -> u64;
fn ctest_size_of__VecU8__x() -> u64;
}
let uninit_ty = MaybeUninit::<VecU8>::zeroed();
let uninit_ty = uninit_ty.as_ptr();
let ty_ptr = unsafe { &raw const (*uninit_ty).x };
let val = unsafe { ty_ptr.read_unaligned() };
let ctest_field_offset = unsafe { ctest_offset_of__VecU8__x() };
check_same(offset_of!(VecU8, x) as u64, ctest_field_offset,
"field offset `x` of `VecU8`");
let ctest_field_size = unsafe { ctest_size_of__VecU8__x() };
check_same(size_of_val(&val) as u64, ctest_field_size,
"field size `x` of `VecU8`");
}
pub fn ctest_field_size_offset_VecU8_y() {
extern "C" {
fn ctest_offset_of__VecU8__y() -> u64;
fn ctest_size_of__VecU8__y() -> u64;
}
let uninit_ty = MaybeUninit::<VecU8>::zeroed();
let uninit_ty = uninit_ty.as_ptr();
let ty_ptr = unsafe { &raw const (*uninit_ty).y };
let val = unsafe { ty_ptr.read_unaligned() };
let ctest_field_offset = unsafe { ctest_offset_of__VecU8__y() };
check_same(offset_of!(VecU8, y) as u64, ctest_field_offset,
"field offset `y` of `VecU8`");
let ctest_field_size = unsafe { ctest_size_of__VecU8__y() };
check_same(size_of_val(&val) as u64, ctest_field_size,
"field size `y` of `VecU8`");
}
pub fn ctest_field_size_offset_VecU16_x() {
extern "C" {
fn ctest_offset_of__VecU16__x() -> u64;
fn ctest_size_of__VecU16__x() -> u64;
}
let uninit_ty = MaybeUninit::<VecU16>::zeroed();
let uninit_ty = uninit_ty.as_ptr();
let ty_ptr = unsafe { &raw const (*uninit_ty).x };
let val = unsafe { ty_ptr.read_unaligned() };
let ctest_field_offset = unsafe { ctest_offset_of__VecU16__x() };
check_same(offset_of!(VecU16, x) as u64, ctest_field_offset,
"field offset `x` of `VecU16`");
let ctest_field_size = unsafe { ctest_size_of__VecU16__x() };
check_same(size_of_val(&val) as u64, ctest_field_size,
"field size `x` of `VecU16`");
}
pub fn ctest_field_size_offset_VecU16_y() {
extern "C" {
fn ctest_offset_of__VecU16__y() -> u64;
fn ctest_size_of__VecU16__y() -> u64;
}
let uninit_ty = MaybeUninit::<VecU16>::zeroed();
let uninit_ty = uninit_ty.as_ptr();
let ty_ptr = unsafe { &raw const (*uninit_ty).y };
let val = unsafe { ty_ptr.read_unaligned() };
let ctest_field_offset = unsafe { ctest_offset_of__VecU16__y() };
check_same(offset_of!(VecU16, y) as u64, ctest_field_offset,
"field offset `y` of `VecU16`");
let ctest_field_size = unsafe { ctest_size_of__VecU16__y() };
check_same(size_of_val(&val) as u64, ctest_field_size,
"field size `y` of `VecU16`");
}
pub fn ctest_field_ptr_VecU8_x() {
extern "C" {
fn ctest_field_ptr__VecU8__x(a: *const VecU8) -> *mut u8;
}
let uninit_ty = MaybeUninit::<VecU8>::zeroed();
let ty_ptr = uninit_ty.as_ptr();
let field_ptr = unsafe { &raw const ((*ty_ptr).x) };
let ctest_field_ptr = unsafe { ctest_field_ptr__VecU8__x(ty_ptr) };
check_same(field_ptr.cast(), ctest_field_ptr,
"field pointer access `x` of `VecU8`");
}
pub fn ctest_field_ptr_VecU8_y() {
extern "C" {
fn ctest_field_ptr__VecU8__y(a: *const VecU8) -> *mut u8;
}
let uninit_ty = MaybeUninit::<VecU8>::zeroed();
let ty_ptr = uninit_ty.as_ptr();
let field_ptr = unsafe { &raw const ((*ty_ptr).y) };
let ctest_field_ptr = unsafe { ctest_field_ptr__VecU8__y(ty_ptr) };
check_same(field_ptr.cast(), ctest_field_ptr,
"field pointer access `y` of `VecU8`");
}
pub fn ctest_field_ptr_VecU16_x() {
extern "C" {
fn ctest_field_ptr__VecU16__x(a: *const VecU16) -> *mut u8;
}
let uninit_ty = MaybeUninit::<VecU16>::zeroed();
let ty_ptr = uninit_ty.as_ptr();
let field_ptr = unsafe { &raw const ((*ty_ptr).x) };
let ctest_field_ptr = unsafe { ctest_field_ptr__VecU16__x(ty_ptr) };
check_same(field_ptr.cast(), ctest_field_ptr,
"field pointer access `x` of `VecU16`");
}
pub fn ctest_field_ptr_VecU16_y() {
extern "C" {
fn ctest_field_ptr__VecU16__y(a: *const VecU16) -> *mut u8;
}
let uninit_ty = MaybeUninit::<VecU16>::zeroed();
let ty_ptr = uninit_ty.as_ptr();
let field_ptr = unsafe { &raw const ((*ty_ptr).y) };
let ctest_field_ptr = unsafe { ctest_field_ptr__VecU16__y(ty_ptr) };
check_same(field_ptr.cast(), ctest_field_ptr,
"field pointer access `y` of `VecU16`");
}
fn roundtrip_padding__VecU8() -> Vec<bool> {
if 2 == 0 {
return vec![!false; size_of::<VecU8>()]
}
#[allow(unused_mut)]
let mut v = Vec::<(usize, usize)>::new();
#[allow(unused_variables)]
let bar = MaybeUninit::<VecU8>::zeroed();
#[allow(unused_variables)]
let bar = bar.as_ptr();
let ty_ptr = unsafe { &raw const ((*bar).x) };
let val = unsafe { ty_ptr.read_unaligned() };
let size = size_of_val(&val);
let off = offset_of!(VecU8, x);
v.push((off, size));
let ty_ptr = unsafe { &raw const ((*bar).y) };
let val = unsafe { ty_ptr.read_unaligned() };
let size = size_of_val(&val);
let off = offset_of!(VecU8, y);
v.push((off, size));
let mut is_padding_byte = vec![true; size_of::<VecU8>()];
for (off, size) in &v {
for i in 0..*size {
is_padding_byte[off + i] = false;
}
}
is_padding_byte
}
pub fn ctest_roundtrip_VecU8() {
type U = VecU8;
extern "C" {
fn ctest_size_of__VecU8() -> u64;
fn ctest_roundtrip__VecU8(
input: MaybeUninit<U>, is_padding_byte: *const bool, value_bytes: *mut u8
) -> U;
}
const SIZE: usize = size_of::<U>();
let is_padding_byte = roundtrip_padding__VecU8();
let mut expected = vec![0u8; SIZE];
let mut input = MaybeUninit::<U>::zeroed();
let input_ptr = input.as_mut_ptr().cast::<u8>();
for i in 0..SIZE {
let c: u8 = (i % 256) as u8;
let c = if c == 0 { 42 } else { c };
let d: u8 = 255_u8 - (i % 256) as u8;
let d = if d == 0 { 42 } else { d };
unsafe {
input_ptr.add(i).write_volatile(c);
expected[i] = d;
}
}
let c_size = unsafe { ctest_size_of__VecU8() } as usize;
if SIZE != c_size {
FAILED.store(true, Ordering::Relaxed);
eprintln!(
"size of `struct VecU8` is {c_size} in C and {SIZE} in Rust\n",
);
return;
}
let mut c_value_bytes = vec![0; size_of::<VecU8>()];
let r: U = unsafe {
ctest_roundtrip__VecU8(input, is_padding_byte.as_ptr(), c_value_bytes.as_mut_ptr())
};
for (i, is_padding_byte) in is_padding_byte.iter().enumerate() {
if *is_padding_byte { continue; }
let rust = unsafe { *input_ptr.add(i) };
let c = c_value_bytes[i];
if rust != c {
eprintln!("rust[{}] = {} != {} (C): Rust `VecU8` -> C", i, rust, c);
FAILED.store(true, Ordering::Relaxed);
}
}
for (i, is_padding_byte) in is_padding_byte.iter().enumerate() {
if *is_padding_byte { continue; }
let rust = expected[i] as usize;
let c = unsafe { (&raw const r).cast::<u8>().add(i).read_volatile() as usize };
if rust != c {
eprintln!(
"rust [{i}] = {rust} != {c} (C): C `VecU8` -> Rust",
);
FAILED.store(true, Ordering::Relaxed);
}
}
}
fn roundtrip_padding__VecU16() -> Vec<bool> {
if 2 == 0 {
return vec![!false; size_of::<VecU16>()]
}
#[allow(unused_mut)]
let mut v = Vec::<(usize, usize)>::new();
#[allow(unused_variables)]
let bar = MaybeUninit::<VecU16>::zeroed();
#[allow(unused_variables)]
let bar = bar.as_ptr();
let ty_ptr = unsafe { &raw const ((*bar).x) };
let val = unsafe { ty_ptr.read_unaligned() };
let size = size_of_val(&val);
let off = offset_of!(VecU16, x);
v.push((off, size));
let ty_ptr = unsafe { &raw const ((*bar).y) };
let val = unsafe { ty_ptr.read_unaligned() };
let size = size_of_val(&val);
let off = offset_of!(VecU16, y);
v.push((off, size));
let mut is_padding_byte = vec![true; size_of::<VecU16>()];
for (off, size) in &v {
for i in 0..*size {
is_padding_byte[off + i] = false;
}
}
is_padding_byte
}
pub fn ctest_roundtrip_VecU16() {
type U = VecU16;
extern "C" {
fn ctest_size_of__VecU16() -> u64;
fn ctest_roundtrip__VecU16(
input: MaybeUninit<U>, is_padding_byte: *const bool, value_bytes: *mut u8
) -> U;
}
const SIZE: usize = size_of::<U>();
let is_padding_byte = roundtrip_padding__VecU16();
let mut expected = vec![0u8; SIZE];
let mut input = MaybeUninit::<U>::zeroed();
let input_ptr = input.as_mut_ptr().cast::<u8>();
for i in 0..SIZE {
let c: u8 = (i % 256) as u8;
let c = if c == 0 { 42 } else { c };
let d: u8 = 255_u8 - (i % 256) as u8;
let d = if d == 0 { 42 } else { d };
unsafe {
input_ptr.add(i).write_volatile(c);
expected[i] = d;
}
}
let c_size = unsafe { ctest_size_of__VecU16() } as usize;
if SIZE != c_size {
FAILED.store(true, Ordering::Relaxed);
eprintln!(
"size of `struct VecU16` is {c_size} in C and {SIZE} in Rust\n",
);
return;
}
let mut c_value_bytes = vec![0; size_of::<VecU16>()];
let r: U = unsafe {
ctest_roundtrip__VecU16(input, is_padding_byte.as_ptr(), c_value_bytes.as_mut_ptr())
};
for (i, is_padding_byte) in is_padding_byte.iter().enumerate() {
if *is_padding_byte { continue; }
let rust = unsafe { *input_ptr.add(i) };
let c = c_value_bytes[i];
if rust != c {
eprintln!("rust[{}] = {} != {} (C): Rust `VecU16` -> C", i, rust, c);
FAILED.store(true, Ordering::Relaxed);
}
}
for (i, is_padding_byte) in is_padding_byte.iter().enumerate() {
if *is_padding_byte { continue; }
let rust = expected[i] as usize;
let c = unsafe { (&raw const r).cast::<u8>().add(i).read_volatile() as usize };
if rust != c {
eprintln!(
"rust [{i}] = {rust} != {c} (C): C `VecU16` -> Rust",
);
FAILED.store(true, Ordering::Relaxed);
}
}
}
}
use generated_tests::*;
fn main() {
println!("RUNNING ALL TESTS");
run_all();
if FAILED.load(std::sync::atomic::Ordering::Relaxed) {
panic!("some tests failed");
} else {
println!(
"PASSED {} tests",
NTESTS.load(std::sync::atomic::Ordering::Relaxed)
);
}
}
fn run_all() {
ctest_size_align_VecU8();
ctest_size_align_VecU16();
ctest_field_size_offset_VecU8_x();
ctest_field_size_offset_VecU8_y();
ctest_field_size_offset_VecU16_x();
ctest_field_size_offset_VecU16_y();
ctest_field_ptr_VecU8_x();
ctest_field_ptr_VecU8_y();
ctest_field_ptr_VecU16_x();
ctest_field_ptr_VecU16_y();
ctest_roundtrip_VecU8();
ctest_roundtrip_VecU16();
}