use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::sync::Arc;
use cranelift::prelude::*;
use cranelift_jit::{JITBuilder, JITModule};
use cranelift_module::{FuncId, Linkage, Module};
use facet_core::{Def, Facet, Field, Shape, Type as FacetType, UserType};
use super::format::make_c_sig;
use super::helpers::{self, JitContext, ParserVTable};
use super::jit_debug;
use crate::{DeserializeError, FormatParser};
pub struct CachedModule {
#[allow(dead_code)]
module: JITModule,
#[allow(dead_code)]
nested_modules: Vec<JITModule>,
fn_ptr: *const u8,
}
impl CachedModule {
pub fn new(module: JITModule, nested_modules: Vec<JITModule>, fn_ptr: *const u8) -> Self {
Self {
module,
nested_modules,
fn_ptr,
}
}
pub fn fn_ptr(&self) -> *const u8 {
self.fn_ptr
}
}
unsafe impl Send for CachedModule {}
unsafe impl Sync for CachedModule {}
pub struct CompiledDeserializer<T, P> {
fn_ptr: *const u8,
vtable: ParserVTable,
_cached: Arc<CachedModule>,
_phantom: PhantomData<fn(&mut P) -> T>,
}
unsafe impl<T, P> Send for CompiledDeserializer<T, P> {}
unsafe impl<T, P> Sync for CompiledDeserializer<T, P> {}
impl<T, P> CompiledDeserializer<T, P> {
pub fn from_cached(cached: Arc<CachedModule>, vtable: ParserVTable) -> Self {
let fn_ptr = cached.fn_ptr();
Self {
fn_ptr,
vtable,
_cached: cached,
_phantom: PhantomData,
}
}
pub fn as_ptr(&self) -> *const u8 {
self.fn_ptr
}
pub fn vtable(&self) -> &ParserVTable {
&self.vtable
}
}
impl<'de, T: Facet<'de>, P: FormatParser<'de>> CompiledDeserializer<T, P> {
pub fn deserialize(&self, parser: &mut P) -> Result<T, DeserializeError<P::Error>> {
let mut output: MaybeUninit<T> = MaybeUninit::uninit();
let mut ctx = JitContext {
parser: parser as *mut P as *mut (),
vtable: &self.vtable,
peeked_event: None,
fields_seen: 0, };
if super::jit_debug_enabled() {
jit_debug!("About to call compiled function at {:p}", self.fn_ptr);
jit_debug!(" ctx: {:p}", &mut ctx);
jit_debug!(" out: {:p}", output.as_mut_ptr());
let code_bytes = unsafe { std::slice::from_raw_parts(self.fn_ptr, 16) };
let bytes_str: String = code_bytes.iter().map(|b| format!("{:02x} ", b)).collect();
jit_debug!(" First 16 bytes of function: {}", bytes_str);
}
type CompiledFn<T> = unsafe extern "C" fn(*mut JitContext, *mut T) -> i32;
let func: CompiledFn<T> = unsafe { std::mem::transmute(self.fn_ptr) };
let result = unsafe { func(&mut ctx, output.as_mut_ptr()) };
#[cfg(debug_assertions)]
eprintln!("[JIT] Compiled function returned: {}", result);
if result == 0 {
Ok(unsafe { output.assume_init() })
} else {
let fields_seen = ctx.fields_seen;
if fields_seen != 0 {
unsafe {
cleanup_partial_struct::<T>(output.as_mut_ptr() as *mut u8, fields_seen);
}
}
if result == helpers::ERR_MISSING_REQUIRED_FIELD {
Err(DeserializeError::MissingField {
field: "unknown", type_name: T::SHAPE.type_identifier,
span: None, path: None, })
} else {
Err(DeserializeError::Unsupported(format!(
"JIT deserialization failed with code {}",
result
)))
}
}
}
}
unsafe fn cleanup_partial_struct<'a, T: Facet<'a>>(ptr: *mut u8, fields_seen: u64) {
use facet_core::PtrMut;
let shape = T::SHAPE;
let FacetType::User(UserType::Struct(struct_def)) = &shape.ty else {
return; };
for (idx, field) in struct_def.fields.iter().enumerate() {
if (fields_seen & (1u64 << idx)) == 0 {
continue;
}
let field_shape = field.shape();
unsafe {
let field_ptr = ptr.add(field.offset);
let _ = field_shape.call_drop_in_place(PtrMut::new(field_ptr));
}
}
}
pub fn is_jit_compatible(shape: &'static Shape) -> bool {
if let Def::List(list_def) = &shape.def {
return is_vec_element_supported(list_def.t);
}
let FacetType::User(UserType::Struct(struct_def)) = &shape.ty else {
return false;
};
if struct_def.fields.iter().any(|f| f.is_flattened()) {
return false;
}
struct_def.fields.iter().all(is_field_type_supported)
}
fn is_field_type_supported(field: &Field) -> bool {
WriteKind::from_shape(field.shape()).is_some()
}
fn is_vec_element_supported(elem_shape: &'static Shape) -> bool {
use facet_core::ScalarType;
if let Some(scalar_type) = elem_shape.scalar_type() {
return matches!(
scalar_type,
ScalarType::Bool
| ScalarType::U8
| ScalarType::U16
| ScalarType::U32
| ScalarType::U64
| ScalarType::I8
| ScalarType::I16
| ScalarType::I32
| ScalarType::I64
| ScalarType::F32
| ScalarType::F64
| ScalarType::String
);
}
if let Def::List(list_def) = &elem_shape.def {
return is_vec_element_supported(list_def.t);
}
if let FacetType::User(UserType::Struct(_)) = &elem_shape.ty {
return is_jit_compatible(elem_shape);
}
false
}
pub struct CompileResult {
pub module: JITModule,
pub nested_modules: Vec<JITModule>,
pub fn_ptr: *const u8,
}
pub fn try_compile_module<'de, T: Facet<'de>>() -> Option<CompileResult> {
let shape = T::SHAPE;
if !is_jit_compatible(shape) {
return None;
}
let mut builder = JITBuilder::new(cranelift_module::default_libcall_names()).ok()?;
register_helpers(&mut builder);
let mut module = JITModule::new(builder);
let (func_id, nested_modules) = if let Def::List(_) = &shape.def {
let func_id = compile_list_deserializer(&mut module, shape)?;
(func_id, Vec::new())
} else {
compile_deserializer(&mut module, shape)?
};
module.finalize_definitions().ok()?;
let fn_ptr = module.get_finalized_function(func_id);
Some(CompileResult {
module,
nested_modules,
fn_ptr,
})
}
fn register_helpers(builder: &mut JITBuilder) {
builder.symbol("jit_write_u8", helpers::jit_write_u8 as *const u8);
builder.symbol("jit_write_u16", helpers::jit_write_u16 as *const u8);
builder.symbol("jit_write_u32", helpers::jit_write_u32 as *const u8);
builder.symbol("jit_write_u64", helpers::jit_write_u64 as *const u8);
builder.symbol("jit_write_i8", helpers::jit_write_i8 as *const u8);
builder.symbol("jit_write_i16", helpers::jit_write_i16 as *const u8);
builder.symbol("jit_write_i32", helpers::jit_write_i32 as *const u8);
builder.symbol("jit_write_i64", helpers::jit_write_i64 as *const u8);
builder.symbol("jit_write_f32", helpers::jit_write_f32 as *const u8);
builder.symbol("jit_write_f64", helpers::jit_write_f64 as *const u8);
builder.symbol("jit_write_bool", helpers::jit_write_bool as *const u8);
builder.symbol("jit_write_string", helpers::jit_write_string as *const u8);
builder.symbol("jit_memcpy", helpers::jit_memcpy as *const u8);
builder.symbol(
"jit_write_error_string",
helpers::jit_write_error_string as *const u8,
);
builder.symbol("jit_field_matches", helpers::jit_field_matches as *const u8);
builder.symbol(
"jit_deserialize_nested",
helpers::jit_deserialize_nested as *const u8,
);
builder.symbol("jit_peek_event", helpers::jit_peek_event as *const u8);
builder.symbol("jit_next_event", helpers::jit_next_event as *const u8);
builder.symbol(
"jit_option_init_none",
helpers::jit_option_init_none as *const u8,
);
builder.symbol(
"jit_option_init_some_from_value",
helpers::jit_option_init_some_from_value as *const u8,
);
builder.symbol(
"jit_vec_init_with_capacity",
helpers::jit_vec_init_with_capacity as *const u8,
);
builder.symbol("jit_vec_push", helpers::jit_vec_push as *const u8);
builder.symbol(
"jit_deserialize_vec",
helpers::jit_deserialize_vec as *const u8,
);
builder.symbol(
"jit_deserialize_list_by_shape",
helpers::jit_deserialize_list_by_shape as *const u8,
);
builder.symbol("jit_vec_push_bool", helpers::jit_vec_push_bool as *const u8);
builder.symbol("jit_vec_push_i64", helpers::jit_vec_push_i64 as *const u8);
builder.symbol("jit_vec_push_u64", helpers::jit_vec_push_u64 as *const u8);
builder.symbol("jit_vec_push_f64", helpers::jit_vec_push_f64 as *const u8);
builder.symbol(
"jit_vec_push_string",
helpers::jit_vec_push_string as *const u8,
);
}
#[derive(Debug, Clone, Copy)]
enum ListElementKind {
Bool,
I64,
U64,
F64,
String,
}
impl ListElementKind {
fn from_shape(shape: &Shape) -> Option<Self> {
use facet_core::ScalarType;
let scalar_type = shape.scalar_type()?;
match scalar_type {
ScalarType::Bool => Some(Self::Bool),
ScalarType::I8 | ScalarType::I16 | ScalarType::I32 | ScalarType::I64 => Some(Self::I64),
ScalarType::U8 | ScalarType::U16 | ScalarType::U32 | ScalarType::U64 => Some(Self::U64),
ScalarType::F32 | ScalarType::F64 => Some(Self::F64),
ScalarType::String => Some(Self::String),
_ => None,
}
}
}
fn compile_list_deserializer(module: &mut JITModule, shape: &'static Shape) -> Option<FuncId> {
let Def::List(list_def) = &shape.def else {
return None;
};
let elem_shape = list_def.t;
let elem_kind = ListElementKind::from_shape(elem_shape)?;
let init_fn = list_def.init_in_place_with_capacity()?;
let push_fn = list_def.push()?;
let pointer_type = module.target_config().pointer_type();
let sig = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_next_event = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_peek_event = sig_next_event.clone();
let sig_vec_init = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s
};
let sig_vec_push_scalar = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); match elem_kind {
ListElementKind::Bool => s.params.push(AbiParam::new(types::I8)), ListElementKind::I64 => s.params.push(AbiParam::new(types::I64)),
ListElementKind::U64 => s.params.push(AbiParam::new(types::I64)), ListElementKind::F64 => s.params.push(AbiParam::new(types::F64)),
ListElementKind::String => {
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I8)); }
}
s
};
let next_event_id = module
.declare_function("jit_next_event", Linkage::Import, &sig_next_event)
.ok()?;
let _peek_event_id = module
.declare_function("jit_peek_event", Linkage::Import, &sig_peek_event)
.ok()?;
let vec_init_id = module
.declare_function("jit_vec_init_with_capacity", Linkage::Import, &sig_vec_init)
.ok()?;
let push_fn_name = match elem_kind {
ListElementKind::Bool => "jit_vec_push_bool",
ListElementKind::I64 => "jit_vec_push_i64",
ListElementKind::U64 => "jit_vec_push_u64",
ListElementKind::F64 => "jit_vec_push_f64",
ListElementKind::String => "jit_vec_push_string",
};
let vec_push_id = module
.declare_function(push_fn_name, Linkage::Import, &sig_vec_push_scalar)
.ok()?;
let func_id = module
.declare_function("jit_deserialize_list", Linkage::Local, &sig)
.ok()?;
let mut ctx = module.make_context();
ctx.func.signature = sig;
let mut builder_ctx = FunctionBuilderContext::new();
{
let mut builder = FunctionBuilder::new(&mut ctx.func, &mut builder_ctx);
let next_event_ref = module.declare_func_in_func(next_event_id, builder.func);
let vec_init_ref = module.declare_func_in_func(vec_init_id, builder.func);
let vec_push_ref = module.declare_func_in_func(vec_push_id, builder.func);
let entry = builder.create_block();
let check_array_start = builder.create_block();
let init_vec = builder.create_block();
let loop_peek = builder.create_block(); let check_end = builder.create_block();
let push_elem = builder.create_block();
let success = builder.create_block();
let error = builder.create_block();
builder.append_block_params_for_function_params(entry);
builder.switch_to_block(entry);
let ctx_ptr = builder.block_params(entry)[0];
let out_ptr = builder.block_params(entry)[1];
let raw_event_slot = builder.create_sized_stack_slot(StackSlotData::new(
StackSlotKind::ExplicitSlot,
helpers::RAW_EVENT_SIZE as u32,
8,
));
let raw_event_ptr = builder.ins().stack_addr(pointer_type, raw_event_slot, 0);
let init_fn_ptr = builder
.ins()
.iconst(pointer_type, init_fn as *const () as i64);
let push_fn_ptr = builder
.ins()
.iconst(pointer_type, push_fn as *const () as i64);
let zero_cap = builder.ins().iconst(pointer_type, 0);
let call = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call)[0];
let is_ok = builder.ins().icmp_imm(IntCC::Equal, result, 0);
builder
.ins()
.brif(is_ok, check_array_start, &[], error, &[]);
builder.seal_block(entry);
builder.switch_to_block(check_array_start);
let tag = builder.ins().load(
types::I8,
MemFlags::trusted(),
raw_event_ptr,
helpers::RAW_EVENT_TAG_OFFSET as i32,
);
let is_array_start =
builder
.ins()
.icmp_imm(IntCC::Equal, tag, helpers::EventTag::ArrayStart as i64);
builder
.ins()
.brif(is_array_start, init_vec, &[], error, &[]);
builder.seal_block(check_array_start);
builder.switch_to_block(init_vec);
builder
.ins()
.call(vec_init_ref, &[out_ptr, zero_cap, init_fn_ptr]);
builder.ins().jump(loop_peek, &[]);
builder.seal_block(init_vec);
builder.switch_to_block(loop_peek);
let call = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call)[0];
let is_ok = builder.ins().icmp_imm(IntCC::Equal, result, 0);
builder.ins().brif(is_ok, check_end, &[], error, &[]);
builder.switch_to_block(check_end);
let tag = builder.ins().load(
types::I8,
MemFlags::trusted(),
raw_event_ptr,
helpers::RAW_EVENT_TAG_OFFSET as i32,
);
let is_end = builder
.ins()
.icmp_imm(IntCC::Equal, tag, helpers::EventTag::ArrayEnd as i64);
builder.ins().brif(is_end, success, &[], push_elem, &[]);
builder.seal_block(check_end);
builder.switch_to_block(push_elem);
let payload_ptr = builder
.ins()
.iadd_imm(raw_event_ptr, helpers::RAW_EVENT_PAYLOAD_OFFSET as i64);
match elem_kind {
ListElementKind::Bool => {
let val = builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(vec_push_ref, &[out_ptr, push_fn_ptr, val]);
}
ListElementKind::I64 => {
let val = builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(vec_push_ref, &[out_ptr, push_fn_ptr, val]);
}
ListElementKind::U64 => {
let val = builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(vec_push_ref, &[out_ptr, push_fn_ptr, val]);
}
ListElementKind::F64 => {
let val = builder
.ins()
.load(types::F64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(vec_push_ref, &[out_ptr, push_fn_ptr, val]);
}
ListElementKind::String => {
let str_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
helpers::STRING_PTR_OFFSET as i32,
);
let str_len = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
helpers::STRING_LEN_OFFSET as i32,
);
let str_cap = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
helpers::STRING_CAPACITY_OFFSET as i32,
);
let str_owned = builder.ins().load(
types::I8,
MemFlags::trusted(),
payload_ptr,
helpers::STRING_OWNED_OFFSET as i32,
);
builder.ins().call(
vec_push_ref,
&[out_ptr, push_fn_ptr, str_ptr, str_len, str_cap, str_owned],
);
}
}
builder.ins().jump(loop_peek, &[]);
builder.seal_block(push_elem);
builder.seal_block(loop_peek);
builder.switch_to_block(success);
let zero = builder.ins().iconst(types::I32, 0);
builder.ins().return_(&[zero]);
builder.seal_block(success);
builder.switch_to_block(error);
let err = builder.ins().iconst(types::I32, -10);
builder.ins().return_(&[err]);
builder.seal_block(error);
builder.finalize();
}
module.define_function(func_id, &mut ctx).ok()?;
Some(func_id)
}
fn compile_deserializer(
module: &mut JITModule,
shape: &'static Shape,
) -> Option<(FuncId, Vec<JITModule>)> {
let FacetType::User(UserType::Struct(struct_def)) = &shape.ty else {
return None;
};
#[cfg(debug_assertions)]
{
eprintln!("[JIT DEBUG] ========================================");
eprintln!(
"[JIT DEBUG] Compiling deserializer for: {}",
shape.type_identifier
);
eprintln!("[JIT DEBUG] Shape pointer: {:p}", shape);
eprintln!("[JIT DEBUG] Shape.id (ConstTypeId): {:?}", shape.id);
eprintln!("[JIT DEBUG] Shape.layout: {:?}", shape.layout);
eprintln!("[JIT DEBUG] struct_def pointer: {:p}", struct_def);
eprintln!(
"[JIT DEBUG] struct_def.fields pointer: {:p}",
struct_def.fields.as_ptr()
);
eprintln!(
"[JIT DEBUG] struct_def.fields.len() = {}",
struct_def.fields.len()
);
eprintln!("[JIT DEBUG] Fields from Shape (struct_def.fields):");
for (i, f) in struct_def.fields.iter().enumerate() {
let field_ptr = f as *const facet_core::Field;
let field_shape = f.shape();
eprintln!(
"[JIT DEBUG] [{}] field_ptr={:p}, name='{}', offset={}",
i, field_ptr, f.name, f.offset
);
eprintln!(
"[JIT DEBUG] field_shape_ptr={:p}, field_shape_type='{}'",
field_shape as *const _, field_shape.type_identifier
);
}
if shape.type_identifier == "UserSparse" {
eprintln!("[JIT DEBUG] *** UserSparse detected - checking actual memory layout ***");
eprintln!(
"[JIT DEBUG] sizeof(UserSparse shape) struct_def layout says: {:?}",
shape.layout
);
eprintln!("[JIT DEBUG] struct_def.repr: {:?}", struct_def.repr);
eprintln!("[JIT DEBUG] struct_def.kind: {:?}", struct_def.kind);
}
eprintln!("[JIT DEBUG] ----------------------------------------");
}
let mut required_bit_counter = 0usize;
let fields: Vec<FieldCodegenInfo> = struct_def
.fields
.iter()
.filter_map(|f| {
let write_kind = WriteKind::from_shape(f.shape())?;
let is_required = !matches!(write_kind, WriteKind::Option(_));
let required_bit_index = if is_required {
let idx = required_bit_counter;
required_bit_counter += 1;
Some(idx)
} else {
None
};
Some(FieldCodegenInfo {
name: f.name,
offset: f.offset,
write_kind,
required_bit_index,
})
})
.collect();
#[cfg(debug_assertions)]
{
eprintln!(
"[JIT DEBUG] Extracted FieldCodegenInfo ({} fields):",
fields.len()
);
for (i, f) in fields.iter().enumerate() {
eprintln!(
"[JIT DEBUG] [{}] name='{}', offset={}",
i, f.name, f.offset
);
}
eprintln!("[JIT DEBUG] ========================================");
}
if required_bit_counter >= 64 {
jit_debug!(
"[Tier-2 JIT] Too many required fields ({} >= 64, max 63 for u64 bitmask)",
required_bit_counter
);
return None;
}
let required_fields_mask: u64 = if required_bit_counter > 0 {
(1u64 << required_bit_counter) - 1
} else {
0
};
let mut nested_lookup: std::collections::HashMap<*const Shape, *const u8> =
std::collections::HashMap::new();
let mut all_nested_modules: Vec<JITModule> = Vec::new();
let mut compile_and_cache = |shape: &'static Shape| -> Option<*const u8> {
#[cfg(debug_assertions)]
eprintln!(
"[JIT DEBUG] compile_and_cache called for nested shape: {} at {:p}",
shape.type_identifier, shape
);
let ptr = shape as *const Shape;
if let std::collections::hash_map::Entry::Vacant(e) = nested_lookup.entry(ptr) {
#[cfg(debug_assertions)]
eprintln!("[JIT DEBUG] -> compiling nested shape (not in cache)");
let mut nested_builder =
JITBuilder::new(cranelift_module::default_libcall_names()).ok()?;
register_helpers(&mut nested_builder);
let mut nested_module = JITModule::new(nested_builder);
let (nested_func_id, sub_nested_modules) =
compile_deserializer(&mut nested_module, shape)?;
nested_module.finalize_definitions().ok()?;
let fn_ptr = nested_module.get_finalized_function(nested_func_id);
e.insert(fn_ptr);
all_nested_modules.push(nested_module);
all_nested_modules.extend(sub_nested_modules);
Some(fn_ptr)
} else {
#[cfg(debug_assertions)]
eprintln!("[JIT DEBUG] -> using cached nested shape");
Some(nested_lookup[&ptr])
}
};
for field in &fields {
match field.write_kind {
WriteKind::NestedStruct(nested_shape) => {
compile_and_cache(nested_shape);
}
WriteKind::Option(option_shape) => {
if let Def::Option(option_def) = &option_shape.def {
let inner_shape = option_def.t;
if let FacetType::User(UserType::Struct(_)) = &inner_shape.ty
&& is_jit_compatible(inner_shape)
{
compile_and_cache(inner_shape);
}
}
}
WriteKind::Vec(vec_shape) => {
if let Def::List(list_def) = &vec_shape.def {
let elem_shape = list_def.t;
if let FacetType::User(UserType::Struct(_)) = &elem_shape.ty
&& is_jit_compatible(elem_shape)
{
compile_and_cache(elem_shape);
}
}
}
_ => {}
}
}
let pointer_type = module.target_config().pointer_type();
let mut sig = make_c_sig(module);
sig.params.push(AbiParam::new(pointer_type)); sig.params.push(AbiParam::new(pointer_type)); sig.returns.push(AbiParam::new(types::I32));
static COUNTER: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(0);
let func_name = format!(
"jit_deserialize_{}",
COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst)
);
let func_id = module
.declare_function(&func_name, Linkage::Local, &sig)
.ok()?;
let sig_next_event = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_peek_event = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_skip_value = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_field_matches = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_write_i64 = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I64)); s
};
let sig_write_u64 = sig_write_i64.clone();
let sig_write_i8 = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I8)); s
};
let sig_write_i16 = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I16)); s
};
let sig_write_i32 = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I32)); s
};
let sig_write_u8 = sig_write_i8.clone();
let sig_write_u16 = sig_write_i16.clone();
let sig_write_u32 = sig_write_i32.clone();
let sig_write_f64 = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::F64)); s
};
let sig_write_bool = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I8)); s
};
let sig_write_string = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I8)); s
};
let sig_deserialize_nested = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_option_init_none = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s
};
let sig_option_init_some_from_value = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s
};
let sig_vec_init_with_capacity = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s
};
let sig_vec_push = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_deserialize_vec = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(types::I8)); s.returns.push(AbiParam::new(types::I32)); s
};
let sig_deserialize_list_by_shape = {
let mut s = make_c_sig(module);
s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.params.push(AbiParam::new(pointer_type)); s.returns.push(AbiParam::new(types::I32)); s
};
let field_matches_id = module
.declare_function("jit_field_matches", Linkage::Import, &sig_field_matches)
.ok()?;
let write_i64_id = module
.declare_function("jit_write_i64", Linkage::Import, &sig_write_i64)
.ok()?;
let write_u64_id = module
.declare_function("jit_write_u64", Linkage::Import, &sig_write_u64)
.ok()?;
let write_i8_id = module
.declare_function("jit_write_i8", Linkage::Import, &sig_write_i8)
.ok()?;
let write_i16_id = module
.declare_function("jit_write_i16", Linkage::Import, &sig_write_i16)
.ok()?;
let write_i32_id = module
.declare_function("jit_write_i32", Linkage::Import, &sig_write_i32)
.ok()?;
let write_u8_id = module
.declare_function("jit_write_u8", Linkage::Import, &sig_write_u8)
.ok()?;
let write_u16_id = module
.declare_function("jit_write_u16", Linkage::Import, &sig_write_u16)
.ok()?;
let write_u32_id = module
.declare_function("jit_write_u32", Linkage::Import, &sig_write_u32)
.ok()?;
let write_f64_id = module
.declare_function("jit_write_f64", Linkage::Import, &sig_write_f64)
.ok()?;
let write_bool_id = module
.declare_function("jit_write_bool", Linkage::Import, &sig_write_bool)
.ok()?;
let write_string_id = module
.declare_function("jit_write_string", Linkage::Import, &sig_write_string)
.ok()?;
let deserialize_nested_id = module
.declare_function(
"jit_deserialize_nested",
Linkage::Import,
&sig_deserialize_nested,
)
.ok()?;
let peek_event_id = module
.declare_function("jit_peek_event", Linkage::Import, &sig_peek_event)
.ok()?;
let next_event_id = module
.declare_function("jit_next_event", Linkage::Import, &sig_next_event)
.ok()?;
let option_init_none_id = module
.declare_function(
"jit_option_init_none",
Linkage::Import,
&sig_option_init_none,
)
.ok()?;
let option_init_some_from_value_id = module
.declare_function(
"jit_option_init_some_from_value",
Linkage::Import,
&sig_option_init_some_from_value,
)
.ok()?;
let vec_init_with_capacity_id = module
.declare_function(
"jit_vec_init_with_capacity",
Linkage::Import,
&sig_vec_init_with_capacity,
)
.ok()?;
let vec_push_id = module
.declare_function("jit_vec_push", Linkage::Import, &sig_vec_push)
.ok()?;
let deserialize_vec_id = module
.declare_function("jit_deserialize_vec", Linkage::Import, &sig_deserialize_vec)
.ok()?;
let deserialize_list_by_shape_id = module
.declare_function(
"jit_deserialize_list_by_shape",
Linkage::Import,
&sig_deserialize_list_by_shape,
)
.ok()?;
let mut ctx = module.make_context();
ctx.func.signature = sig;
let mut builder_ctx = FunctionBuilderContext::new();
{
let mut builder = FunctionBuilder::new(&mut ctx.func, &mut builder_ctx);
let field_matches_ref = module.declare_func_in_func(field_matches_id, builder.func);
let write_i64_ref = module.declare_func_in_func(write_i64_id, builder.func);
let write_u64_ref = module.declare_func_in_func(write_u64_id, builder.func);
let write_i8_ref = module.declare_func_in_func(write_i8_id, builder.func);
let write_i16_ref = module.declare_func_in_func(write_i16_id, builder.func);
let write_i32_ref = module.declare_func_in_func(write_i32_id, builder.func);
let write_u8_ref = module.declare_func_in_func(write_u8_id, builder.func);
let write_u16_ref = module.declare_func_in_func(write_u16_id, builder.func);
let write_u32_ref = module.declare_func_in_func(write_u32_id, builder.func);
let write_f64_ref = module.declare_func_in_func(write_f64_id, builder.func);
let write_bool_ref = module.declare_func_in_func(write_bool_id, builder.func);
let write_string_ref = module.declare_func_in_func(write_string_id, builder.func);
let deserialize_nested_ref =
module.declare_func_in_func(deserialize_nested_id, builder.func);
let peek_event_ref = module.declare_func_in_func(peek_event_id, builder.func);
let next_event_ref = module.declare_func_in_func(next_event_id, builder.func);
let option_init_none_ref = module.declare_func_in_func(option_init_none_id, builder.func);
let option_init_some_from_value_ref =
module.declare_func_in_func(option_init_some_from_value_id, builder.func);
let _vec_init_with_capacity_ref =
module.declare_func_in_func(vec_init_with_capacity_id, builder.func);
let _vec_push_ref = module.declare_func_in_func(vec_push_id, builder.func);
let _deserialize_vec_ref = module.declare_func_in_func(deserialize_vec_id, builder.func);
let deserialize_list_by_shape_ref =
module.declare_func_in_func(deserialize_list_by_shape_id, builder.func);
let entry_block = builder.create_block();
builder.append_block_params_for_function_params(entry_block);
builder.switch_to_block(entry_block);
builder.seal_block(entry_block);
let ctx_ptr = builder.block_params(entry_block)[0];
let out_ptr = builder.block_params(entry_block)[1];
let required_fields_seen = builder.declare_var(types::I64);
let zero_i64 = builder.ins().iconst(types::I64, 0);
builder.def_var(required_fields_seen, zero_i64);
let raw_event_slot = builder.create_sized_stack_slot(StackSlotData::new(
StackSlotKind::ExplicitSlot,
helpers::RAW_EVENT_SIZE as u32,
8, ));
let raw_event_ptr = builder.ins().stack_addr(pointer_type, raw_event_slot, 0);
let parser_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
ctx_ptr,
helpers::JIT_CONTEXT_PARSER_OFFSET as i32,
);
let vtable_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
ctx_ptr,
helpers::JIT_CONTEXT_VTABLE_OFFSET as i32,
);
let skip_value_fn = builder.ins().load(
pointer_type,
MemFlags::trusted(),
vtable_ptr,
helpers::VTABLE_SKIP_VALUE_OFFSET as i32,
);
let error_block = builder.create_block();
let success_block = builder.create_block();
let field_loop = builder.create_block();
let sig_skip_value_ref = builder.import_signature(sig_skip_value.clone());
let call_result = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
let check_struct_start = builder.create_block();
builder
.ins()
.brif(is_error, error_block, &[], check_struct_start, &[]);
builder.switch_to_block(check_struct_start);
let tag = builder.ins().load(
types::I8,
MemFlags::trusted(),
raw_event_ptr,
helpers::RAW_EVENT_TAG_OFFSET as i32,
);
let is_struct_start = builder.ins().icmp_imm(IntCC::Equal, tag, 0);
let init_options_block = builder.create_block();
builder
.ins()
.brif(is_struct_start, init_options_block, &[], error_block, &[]);
builder.switch_to_block(init_options_block);
for field in &fields {
if let WriteKind::Option(option_shape) = field.write_kind {
let Def::Option(option_def) = &option_shape.def else {
continue;
};
let init_none_fn_ptr = option_def.vtable.init_none as *const u8;
let init_none_fn_val = builder.ins().iconst(pointer_type, init_none_fn_ptr as i64);
let offset_val = builder.ins().iconst(pointer_type, field.offset as i64);
let field_ptr = builder.ins().iadd(out_ptr, offset_val);
builder
.ins()
.call(option_init_none_ref, &[field_ptr, init_none_fn_val]);
}
}
builder.ins().jump(field_loop, &[]);
builder.seal_block(init_options_block);
builder.switch_to_block(field_loop);
let call_result = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
let check_event_tag = builder.create_block();
builder
.ins()
.brif(is_error, error_block, &[], check_event_tag, &[]);
builder.switch_to_block(check_event_tag);
let tag = builder.ins().load(
types::I8,
MemFlags::trusted(),
raw_event_ptr,
helpers::RAW_EVENT_TAG_OFFSET as i32,
);
let is_struct_end = builder.ins().icmp_imm(IntCC::Equal, tag, 1); let check_field_key = builder.create_block();
builder
.ins()
.brif(is_struct_end, success_block, &[], check_field_key, &[]);
builder.switch_to_block(check_field_key);
let is_field_key = builder.ins().icmp_imm(IntCC::Equal, tag, 4); let process_field = builder.create_block();
builder
.ins()
.brif(is_field_key, process_field, &[], error_block, &[]);
builder.switch_to_block(process_field);
let payload_ptr = builder
.ins()
.iadd_imm(raw_event_ptr, helpers::RAW_EVENT_PAYLOAD_OFFSET as i64);
let name_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
helpers::FIELD_NAME_PTR_OFFSET as i32,
);
let name_len = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
helpers::FIELD_NAME_LEN_OFFSET as i32,
);
let field_blocks: Vec<Block> = fields.iter().map(|_| builder.create_block()).collect();
let skip_field_block = builder.create_block();
let after_field = builder.create_block();
let set_bit_blocks: Vec<Option<Block>> = fields
.iter()
.map(|f| {
if f.required_bit_index.is_some() {
Some(builder.create_block())
} else {
None
}
})
.collect();
let mut compare_blocks: Vec<Block> = Vec::new();
for (i, field) in fields.iter().enumerate() {
let next_compare = if i + 1 < fields.len() {
let block = builder.create_block();
compare_blocks.push(block);
block
} else {
skip_field_block
};
let expected_ptr = builder
.ins()
.iconst(pointer_type, field.name.as_ptr() as i64);
let expected_len = builder.ins().iconst(pointer_type, field.name.len() as i64);
let call_result = builder.ins().call(
field_matches_ref,
&[name_ptr, name_len, expected_ptr, expected_len],
);
let matches = builder.inst_results(call_result)[0];
let is_match = builder.ins().icmp_imm(IntCC::NotEqual, matches, 0);
builder
.ins()
.brif(is_match, field_blocks[i], &[], next_compare, &[]);
if i + 1 < fields.len() {
builder.switch_to_block(next_compare);
}
}
builder.switch_to_block(skip_field_block);
let call_result =
builder
.ins()
.call_indirect(sig_skip_value_ref, skip_value_fn, &[parser_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
builder
.ins()
.brif(is_error, error_block, &[], after_field, &[]);
for (i, field) in fields.iter().enumerate() {
builder.switch_to_block(field_blocks[i]);
let offset_val = builder.ins().iconst(pointer_type, field.offset as i64);
#[cfg(debug_assertions)]
eprintln!(
"[JIT COMPILE] Field '{}' at offset {}",
field.name, field.offset
);
let continue_target = set_bit_blocks[i].unwrap_or(after_field);
match field.write_kind {
WriteKind::NestedStruct(nested_shape) => {
let func_ptr = nested_lookup[&(nested_shape as *const Shape)];
let func_ptr_val = builder.ins().iconst(pointer_type, func_ptr as i64);
let nested_out_ptr = builder.ins().iadd(out_ptr, offset_val);
let call_result = builder.ins().call(
deserialize_nested_ref,
&[ctx_ptr, nested_out_ptr, func_ptr_val],
);
let nested_result = builder.inst_results(call_result)[0];
let nested_is_error =
builder
.ins()
.icmp_imm(IntCC::SignedLessThan, nested_result, 0);
builder
.ins()
.brif(nested_is_error, error_block, &[], continue_target, &[]);
}
WriteKind::Option(option_shape) => {
let call_result = builder
.ins()
.call(peek_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
let check_null_block = builder.create_block();
builder
.ins()
.brif(is_error, error_block, &[], check_null_block, &[]);
builder.switch_to_block(check_null_block);
let scalar_tag = builder.ins().load(
types::I8,
MemFlags::trusted(),
raw_event_ptr,
1, );
let is_null = builder.ins().icmp_imm(IntCC::Equal, scalar_tag, 1);
let handle_none_block = builder.create_block();
let handle_some_block = builder.create_block();
builder
.ins()
.brif(is_null, handle_none_block, &[], handle_some_block, &[]);
builder.switch_to_block(handle_none_block);
let _consume_result = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let Def::Option(option_def) = &option_shape.def else {
unreachable!();
};
let init_none_fn_ptr = option_def.vtable.init_none as *const u8;
let init_none_fn_val =
builder.ins().iconst(pointer_type, init_none_fn_ptr as i64);
let field_ptr = builder.ins().iadd(out_ptr, offset_val);
builder
.ins()
.call(option_init_none_ref, &[field_ptr, init_none_fn_val]);
builder.ins().jump(after_field, &[]);
builder.switch_to_block(handle_some_block);
let Def::Option(option_def) = &option_shape.def else {
unreachable!();
};
let inner_shape = option_def.t;
let inner_write_kind = WriteKind::from_shape(inner_shape);
if inner_write_kind.is_none() {
builder.ins().jump(error_block, &[]);
builder.seal_block(check_null_block);
builder.seal_block(handle_some_block);
continue;
}
let inner_write_kind = inner_write_kind.unwrap();
let value_slot = builder.create_sized_stack_slot(StackSlotData::new(
StackSlotKind::ExplicitSlot,
256,
8,
));
let value_ptr = builder.ins().stack_addr(pointer_type, value_slot, 0);
let value_offset = builder.ins().iconst(pointer_type, 0);
let zero_i64 = builder.ins().iconst(types::I64, 0);
for offset in (0..256).step_by(8) {
builder
.ins()
.store(MemFlags::trusted(), zero_i64, value_ptr, offset);
}
let call_result = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
let write_value_block = builder.create_block();
builder
.ins()
.brif(is_error, error_block, &[], write_value_block, &[]);
builder.switch_to_block(write_value_block);
let payload_ptr = builder
.ins()
.iadd_imm(raw_event_ptr, helpers::RAW_EVENT_PAYLOAD_OFFSET as i64);
match inner_write_kind {
WriteKind::I8 => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_i8_ref, &[value_ptr, value_offset, value]);
}
WriteKind::I16 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_i16 = builder.ins().ireduce(types::I16, value);
builder
.ins()
.call(write_i16_ref, &[value_ptr, value_offset, value_i16]);
}
WriteKind::I32 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_i32 = builder.ins().ireduce(types::I32, value);
builder
.ins()
.call(write_i32_ref, &[value_ptr, value_offset, value_i32]);
}
WriteKind::I64 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_i64_ref, &[value_ptr, value_offset, value]);
}
WriteKind::U8 => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_u8_ref, &[value_ptr, value_offset, value]);
}
WriteKind::U16 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_u16 = builder.ins().ireduce(types::I16, value);
builder
.ins()
.call(write_u16_ref, &[value_ptr, value_offset, value_u16]);
}
WriteKind::U32 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_u32 = builder.ins().ireduce(types::I32, value);
builder
.ins()
.call(write_u32_ref, &[value_ptr, value_offset, value_u32]);
}
WriteKind::U64 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_u64_ref, &[value_ptr, value_offset, value]);
}
WriteKind::F64 | WriteKind::F32 => {
let value =
builder
.ins()
.load(types::F64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_f64_ref, &[value_ptr, value_offset, value]);
}
WriteKind::Bool => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_bool_ref, &[value_ptr, value_offset, value]);
}
WriteKind::String => {
let str_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
0,
);
let str_len = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
8,
);
let str_capacity = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
16,
);
let str_owned =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 24);
builder.ins().call(
write_string_ref,
&[
value_ptr,
value_offset,
str_ptr,
str_len,
str_capacity,
str_owned,
],
);
}
WriteKind::NestedStruct(_) | WriteKind::Option(_) | WriteKind::Vec(_) => {
builder.ins().jump(error_block, &[]);
builder.seal_block(check_null_block);
builder.seal_block(handle_some_block);
builder.seal_block(write_value_block);
continue;
}
}
let init_some_fn_ptr = option_def.vtable.init_some as *const u8;
let init_some_fn_val =
builder.ins().iconst(pointer_type, init_some_fn_ptr as i64);
let field_ptr = builder.ins().iadd(out_ptr, offset_val);
builder.ins().call(
option_init_some_from_value_ref,
&[field_ptr, value_ptr, init_some_fn_val],
);
builder.ins().jump(after_field, &[]);
builder.seal_block(check_null_block);
builder.seal_block(handle_none_block);
builder.seal_block(handle_some_block);
builder.seal_block(write_value_block);
}
WriteKind::Vec(vec_shape) => {
let vec_shape_ptr = vec_shape as *const Shape;
let vec_shape_val = builder.ins().iconst(pointer_type, vec_shape_ptr as i64);
let field_ptr = builder.ins().iadd(out_ptr, offset_val);
let elem_deserializer_val = if let Def::List(list_def) = &vec_shape.def {
let elem_shape = list_def.t;
if let FacetType::User(UserType::Struct(_)) = &elem_shape.ty {
if let Some(&func_ptr) =
nested_lookup.get(&(elem_shape as *const Shape))
{
builder.ins().iconst(pointer_type, func_ptr as i64)
} else {
builder.ins().iconst(pointer_type, 0)
}
} else {
builder.ins().iconst(pointer_type, 0)
}
} else {
builder.ins().iconst(pointer_type, 0)
};
let call_result = builder.ins().call(
deserialize_list_by_shape_ref,
&[ctx_ptr, field_ptr, vec_shape_val, elem_deserializer_val],
);
let vec_result = builder.inst_results(call_result)[0];
let vec_is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, vec_result, 0);
builder
.ins()
.brif(vec_is_error, error_block, &[], continue_target, &[]);
}
_ => {
let call_result = builder
.ins()
.call(next_event_ref, &[ctx_ptr, raw_event_ptr]);
let result = builder.inst_results(call_result)[0];
let is_error = builder.ins().icmp_imm(IntCC::SignedLessThan, result, 0);
let write_value_block = builder.create_block();
builder
.ins()
.brif(is_error, error_block, &[], write_value_block, &[]);
builder.switch_to_block(write_value_block);
let payload_ptr = builder
.ins()
.iadd_imm(raw_event_ptr, helpers::RAW_EVENT_PAYLOAD_OFFSET as i64);
match field.write_kind {
WriteKind::I8 => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_i8_ref, &[out_ptr, offset_val, value]);
}
WriteKind::I16 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_i16 = builder.ins().ireduce(types::I16, value);
builder
.ins()
.call(write_i16_ref, &[out_ptr, offset_val, value_i16]);
}
WriteKind::I32 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_i32 = builder.ins().ireduce(types::I32, value);
builder
.ins()
.call(write_i32_ref, &[out_ptr, offset_val, value_i32]);
}
WriteKind::I64 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_i64_ref, &[out_ptr, offset_val, value]);
}
WriteKind::U8 => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_u8_ref, &[out_ptr, offset_val, value]);
}
WriteKind::U16 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_u16 = builder.ins().ireduce(types::I16, value);
builder
.ins()
.call(write_u16_ref, &[out_ptr, offset_val, value_u16]);
}
WriteKind::U32 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
let value_u32 = builder.ins().ireduce(types::I32, value);
builder
.ins()
.call(write_u32_ref, &[out_ptr, offset_val, value_u32]);
}
WriteKind::U64 => {
let value =
builder
.ins()
.load(types::I64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_u64_ref, &[out_ptr, offset_val, value]);
}
WriteKind::F64 | WriteKind::F32 => {
let value =
builder
.ins()
.load(types::F64, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_f64_ref, &[out_ptr, offset_val, value]);
}
WriteKind::Bool => {
let value =
builder
.ins()
.load(types::I8, MemFlags::trusted(), payload_ptr, 0);
builder
.ins()
.call(write_bool_ref, &[out_ptr, offset_val, value]);
}
WriteKind::String => {
let str_ptr = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
0,
);
let str_len = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
8, );
let str_capacity = builder.ins().load(
pointer_type,
MemFlags::trusted(),
payload_ptr,
16, );
let str_owned = builder.ins().load(
types::I8,
MemFlags::trusted(),
payload_ptr,
24, );
builder.ins().call(
write_string_ref,
&[
out_ptr,
offset_val,
str_ptr,
str_len,
str_capacity,
str_owned,
],
);
}
WriteKind::NestedStruct(_) => {
unreachable!("Nested struct should be handled in outer match");
}
WriteKind::Option(_) => {
unreachable!("Option should be handled in outer match");
}
WriteKind::Vec(_) => {
unreachable!("Vec should be handled in outer match");
}
}
builder.ins().jump(continue_target, &[]);
builder.seal_block(write_value_block);
}
}
}
for (i, field) in fields.iter().enumerate() {
if let Some(bit_index) = field.required_bit_index {
let set_bit_block = set_bit_blocks[i].unwrap();
builder.switch_to_block(set_bit_block);
let current = builder.use_var(required_fields_seen);
let bit = builder.ins().iconst(types::I64, 1i64 << bit_index);
let updated = builder.ins().bor(current, bit);
builder.def_var(required_fields_seen, updated);
builder.ins().jump(after_field, &[]);
}
}
builder.switch_to_block(after_field);
builder.ins().jump(field_loop, &[]);
builder.switch_to_block(success_block);
if required_fields_mask != 0 {
let seen = builder.use_var(required_fields_seen);
let expected = builder
.ins()
.iconst(types::I64, required_fields_mask as i64);
let all_seen = builder.ins().icmp(IntCC::Equal, seen, expected);
let return_success = builder.create_block();
let missing_field_error = builder.create_block();
builder
.ins()
.brif(all_seen, return_success, &[], missing_field_error, &[]);
builder.switch_to_block(missing_field_error);
let seen_for_error = builder.use_var(required_fields_seen);
builder.ins().store(
MemFlags::trusted(),
seen_for_error,
ctx_ptr,
helpers::JIT_CONTEXT_FIELDS_SEEN_OFFSET as i32,
);
let err_missing = builder
.ins()
.iconst(types::I32, helpers::ERR_MISSING_REQUIRED_FIELD as i64);
builder.ins().return_(&[err_missing]);
builder.switch_to_block(return_success);
let zero = builder.ins().iconst(types::I32, 0);
builder.ins().return_(&[zero]);
builder.seal_block(return_success);
builder.seal_block(missing_field_error);
} else {
let zero = builder.ins().iconst(types::I32, 0);
builder.ins().return_(&[zero]);
}
builder.switch_to_block(error_block);
let seen_for_cleanup = builder.use_var(required_fields_seen);
builder.ins().store(
MemFlags::trusted(),
seen_for_cleanup,
ctx_ptr,
helpers::JIT_CONTEXT_FIELDS_SEEN_OFFSET as i32,
);
let err = builder.ins().iconst(types::I32, -1);
builder.ins().return_(&[err]);
builder.seal_block(check_struct_start);
builder.seal_block(field_loop);
builder.seal_block(check_event_tag);
builder.seal_block(check_field_key);
builder.seal_block(process_field);
builder.seal_block(skip_field_block);
builder.seal_block(after_field);
builder.seal_block(success_block);
builder.seal_block(error_block);
for block in &field_blocks {
builder.seal_block(*block);
}
for block in &compare_blocks {
builder.seal_block(*block);
}
for block in set_bit_blocks.iter().flatten() {
builder.seal_block(*block);
}
builder.finalize();
}
module.define_function(func_id, &mut ctx).ok()?;
Some((func_id, all_nested_modules))
}
#[allow(dead_code)]
struct FieldCodegenInfo {
name: &'static str,
offset: usize,
write_kind: WriteKind,
required_bit_index: Option<usize>,
}
#[allow(dead_code)]
enum WriteKind {
U8,
U16,
U32,
U64,
I8,
I16,
I32,
I64,
F32,
F64,
Bool,
String,
NestedStruct(&'static Shape),
Option(&'static Shape),
Vec(&'static Shape),
}
#[allow(dead_code)]
impl WriteKind {
fn from_shape(shape: &'static Shape) -> Option<Self> {
use facet_core::ScalarType;
if let Some(scalar_type) = shape.scalar_type() {
return match scalar_type {
ScalarType::Bool => Some(WriteKind::Bool),
ScalarType::U8 => Some(WriteKind::U8),
ScalarType::U16 => Some(WriteKind::U16),
ScalarType::U32 => Some(WriteKind::U32),
ScalarType::U64 => Some(WriteKind::U64),
ScalarType::I8 => Some(WriteKind::I8),
ScalarType::I16 => Some(WriteKind::I16),
ScalarType::I32 => Some(WriteKind::I32),
ScalarType::I64 => Some(WriteKind::I64),
ScalarType::F32 => Some(WriteKind::F32),
ScalarType::F64 => Some(WriteKind::F64),
ScalarType::String => Some(WriteKind::String),
_ => None, };
}
match &shape.def {
Def::Option(_option_def) => {
Some(WriteKind::Option(shape))
}
Def::List(list_def) => {
if is_vec_element_supported(list_def.t) {
Some(WriteKind::Vec(shape))
} else {
None
}
}
_ => {
if let FacetType::User(UserType::Struct(_)) = &shape.ty {
if is_jit_compatible(shape) {
return Some(WriteKind::NestedStruct(shape));
}
}
None
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_jit_compatibility_primitives() {
assert!(!is_jit_compatible(i64::SHAPE));
assert!(!is_jit_compatible(String::SHAPE));
assert!(!is_jit_compatible(bool::SHAPE));
}
#[test]
fn test_vec_element_supported() {
assert!(is_vec_element_supported(i64::SHAPE));
assert!(is_vec_element_supported(f64::SHAPE));
assert!(is_vec_element_supported(bool::SHAPE));
assert!(is_vec_element_supported(String::SHAPE));
assert!(is_vec_element_supported(<Vec<f64>>::SHAPE));
assert!(is_vec_element_supported(<Vec<Vec<f64>>>::SHAPE));
assert!(is_vec_element_supported(<Vec<Vec<Vec<f64>>>>::SHAPE));
}
}