freenet 0.2.26

Freenet core software
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
use super::{
    RuntimeResult,
    contract_store::ContractStore,
    delegate_api::DelegateApiVersion,
    delegate_store::DelegateStore,
    engine::{BackendEngine, Engine, InstanceHandle, WasmEngine},
    error::RuntimeInnerError,
    native_api,
    secrets_store::SecretsStore,
};
use freenet_stdlib::{
    memory::{
        WasmLinearMem,
        buf::{BufferBuilder, BufferMut},
    },
    prelude::*,
};
use lru::LruCache;
use std::sync::{Arc, Mutex};
use std::{num::NonZeroUsize, sync::atomic::AtomicI64};

/// A compiled WASM module cache shared across multiple `Runtime` instances.
///
/// Wasmer `Module` wraps `Arc<Artifact>`, so clones are cheap (just an Arc
/// refcount bump). Sharing the cache across the `RuntimePool` avoids compiling
/// and storing the same contract N times (once per pool executor).
pub(crate) type SharedModuleCache<K> = Arc<Mutex<LruCache<K, <Engine as WasmEngine>::Module>>>;

static INSTANCE_ID: AtomicI64 = AtomicI64::new(0);

/// Default capacity for each compiled WASM module cache.
///
/// This limits how many compiled contract/delegate modules are kept in memory.
/// When a cache is full, the least recently used module is evicted.
///
/// **Current value: 1024 modules per cache**
///
/// # Why 1024?
///
/// Wasmer's internal `code_memory: Vec<CodeMemory>` only grows — compiled machine
/// code persists even after a `Module` is dropped. Memory is only freed when the
/// entire `Engine` is dropped. Every eviction-recompilation cycle permanently grows
/// `code_memory`, causing unbounded memory growth proportional to total compilations
/// over the Engine's lifetime (see #2941).
///
/// A capacity of 1024 avoids evictions on production gateways (~92 contracts as of
/// Feb 2026), preventing the eviction-recompilation cycles that drive `code_memory`
/// growth.
///
/// # Memory Impact
///
/// Each compiled `Module` is typically 100KB-1MB. With shared caches (one instance
/// per cache type across all pool executors), actual memory usage is bounded by the
/// number of unique contracts/delegates on the network, not the capacity.
pub const DEFAULT_MODULE_CACHE_CAPACITY: usize = 1024;

/// A live WASM instance with RAII cleanup.
///
/// On drop, removes the MEM_ADDR entry. The WASM `Instance` is cleaned
/// up by calling [`Runtime::drop_running_instance`] after the instance is
/// no longer needed.
pub(super) struct RunningInstance {
    pub id: i64,
    pub handle: InstanceHandle,
    /// Whether the contract imports `freenet_contract_io` (streaming buffer support).
    /// Contracts compiled against stdlib >= 0.3.4 have this; older ones don't.
    pub supports_streaming: bool,
    /// Set to true when the engine instance has been explicitly cleaned up.
    dropped_from_engine: bool,
}

impl RunningInstance {
    fn new(
        engine: &mut Engine,
        module: &<Engine as WasmEngine>::Module,
        key: Key,
        req_bytes: usize,
    ) -> RuntimeResult<Self> {
        let id = INSTANCE_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
        let handle = engine.create_instance(module, id, req_bytes)?;

        // Record memory address and size for host function pointer arithmetic
        let (ptr, size) = engine.memory_info(&handle)?;
        native_api::MEM_ADDR.insert(id, InstanceInfo::new(ptr as i64, size, key));

        // Detect if the contract supports streaming buffers by checking
        // whether it imports the freenet_contract_io namespace. Contracts
        // compiled against stdlib >= 0.3.4 have this import.
        let supports_streaming = engine.module_has_streaming_io(module);

        Ok(Self {
            id,
            handle,
            supports_streaming,
            dropped_from_engine: false,
        })
    }
}

impl Drop for RunningInstance {
    fn drop(&mut self) {
        if !self.dropped_from_engine {
            tracing::debug!(
                instance_id = self.id,
                "RunningInstance dropped without engine cleanup — MEM_ADDR cleaned up, \
                 but WASM Instance will leak until engine is dropped"
            );
        }
        // Always clean up MEM_ADDR as a safety net (idempotent — engine may have already removed it)
        let _ = native_api::MEM_ADDR.remove(&self.id);
    }
}

pub(crate) struct InstanceInfo {
    pub start_ptr: i64,
    pub mem_size: usize,
    key: Key,
}

impl InstanceInfo {
    pub(crate) fn new(start_ptr: i64, mem_size: usize, key: Key) -> Self {
        Self {
            start_ptr,
            mem_size,
            key,
        }
    }

    pub fn key(&self) -> String {
        match &self.key {
            Key::Contract(k) => k.encode(),
            Key::Delegate(k) => k.encode(),
        }
    }
}

pub(super) enum Key {
    Contract(ContractInstanceId),
    Delegate(DelegateKey),
}

#[derive(thiserror::Error, Debug)]
pub enum ContractExecError {
    #[error(transparent)]
    ContractError(#[from] ContractError),

    #[error("Attempted to perform a put for an already put contract ({0}), use update instead")]
    DoublePut(ContractKey),

    #[error("could not cast array length of {0} to max size (i32::MAX)")]
    InvalidArrayLength(usize),

    #[error("unexpected result from contract interface")]
    UnexpectedResult,

    #[error(
        "The operation ran out of gas. This might be caused by an infinite loop or an inefficient computation."
    )]
    OutOfGas,

    #[error("The operation exceeded the maximum allowed compute time")]
    MaxComputeTimeExceeded,
}

pub struct RuntimeConfig {
    /// Maximum allowed execution time for WASM code in seconds
    pub max_execution_seconds: f64,
    /// Optional override for CPU cycles per second
    pub cpu_cycles_per_second: Option<u64>,
    /// Safety margin for CPU speed variations (0.0 to 1.0)
    pub safety_margin: f64,
    pub enable_metering: bool,
    /// Maximum number of compiled modules to keep in each cache.
    pub module_cache_capacity: usize,
}

impl Default for RuntimeConfig {
    fn default() -> Self {
        Self {
            max_execution_seconds: 5.0,
            cpu_cycles_per_second: None,
            safety_margin: 0.2,
            enable_metering: false,
            module_cache_capacity: DEFAULT_MODULE_CACHE_CAPACITY,
        }
    }
}

pub struct Runtime {
    /// The WASM engine backend (wasmtime).
    pub(super) engine: Engine,

    pub(super) secret_store: SecretsStore,
    pub(super) delegate_store: DelegateStore,
    /// LRU cache of compiled delegate modules (shared across pool executors).
    pub(super) delegate_modules: SharedModuleCache<DelegateKey>,

    /// Local contract storage.
    pub(crate) contract_store: ContractStore,
    /// LRU cache of compiled contract modules (shared across pool executors).
    pub(super) contract_modules: SharedModuleCache<ContractKey>,

    /// Optional state storage backend for V2 delegate contract access.
    pub(crate) state_store_db: Option<crate::contract::storages::Storage>,
}

impl Runtime {
    /// Check if the runtime is in a healthy state and can execute WASM.
    pub fn is_healthy(&self) -> bool {
        self.engine.is_healthy()
    }

    /// Get a clone of the backend engine for sharing with other runtimes.
    pub(crate) fn clone_backend_engine(&self) -> BackendEngine {
        self.engine.clone_backend_engine()
    }

    /// Set the state storage backend for V2 delegate contract access.
    pub fn set_state_store_db(&mut self, db: crate::contract::storages::Storage) {
        self.state_store_db = Some(db);
    }

    pub fn build_with_config(
        contract_store: ContractStore,
        delegate_store: DelegateStore,
        secret_store: SecretsStore,
        host_mem: bool,
        config: RuntimeConfig,
    ) -> RuntimeResult<Self> {
        let cache_capacity =
            NonZeroUsize::new(config.module_cache_capacity).unwrap_or(NonZeroUsize::MIN);

        let engine = Engine::new(&config, host_mem)?;

        Ok(Self {
            engine,

            secret_store,
            delegate_store,
            contract_modules: Arc::new(Mutex::new(LruCache::new(cache_capacity))),

            contract_store,
            delegate_modules: Arc::new(Mutex::new(LruCache::new(cache_capacity))),
            state_store_db: None,
        })
    }

    pub fn build(
        contract_store: ContractStore,
        delegate_store: DelegateStore,
        secret_store: SecretsStore,
        host_mem: bool,
    ) -> RuntimeResult<Self> {
        Self::build_with_config(
            contract_store,
            delegate_store,
            secret_store,
            host_mem,
            RuntimeConfig::default(),
        )
    }

    /// Build a runtime that shares compiled module caches AND the backend engine
    /// with other runtimes.
    ///
    /// Used by `RuntimePool` to avoid duplicating compiled WASM modules across
    /// pool executors. Each executor gets its own Store (runtime state:
    /// memories, globals, instances), but all share the same backend engine
    /// (compiler) and module cache.
    ///
    /// # Safety requirement
    ///
    /// All runtimes sharing a module cache MUST use the same backend engine.
    /// Compiled modules store references to the compiling Engine's internal data
    /// structures. Using a Module compiled by one Engine in a Store backed by a
    /// different Engine causes SIGSEGV.
    pub(crate) fn build_with_shared_module_caches(
        contract_store: ContractStore,
        delegate_store: DelegateStore,
        secret_store: SecretsStore,
        host_mem: bool,
        contract_modules: SharedModuleCache<ContractKey>,
        delegate_modules: SharedModuleCache<DelegateKey>,
        shared_backend: BackendEngine,
    ) -> RuntimeResult<Self> {
        let engine =
            Engine::new_with_shared_backend(&RuntimeConfig::default(), host_mem, shared_backend)?;
        Ok(Self {
            engine,
            secret_store,
            delegate_store,
            contract_modules,
            contract_store,
            delegate_modules,
            state_store_db: None,
        })
    }

    /// Explicitly clean up a running instance from the engine.
    ///
    /// This removes the WASM `Instance` from the engine's HashMap and
    /// the MEM_ADDR entry. Should be called after the instance is no longer
    /// needed (after all WASM calls are complete).
    pub(super) fn drop_running_instance(&mut self, running: &mut RunningInstance) {
        self.engine.drop_instance(&running.handle);
        running.dropped_from_engine = true;
    }

    pub(super) fn init_buf<T>(
        &mut self,
        handle: &InstanceHandle,
        data: T,
    ) -> RuntimeResult<BufferMut<'_>>
    where
        T: AsRef<[u8]>,
    {
        let data = data.as_ref();
        let builder_ptr = self.engine.initiate_buffer(handle, data.len() as u32)?;
        let linear_mem = self.linear_mem(handle)?;
        // SAFETY: `builder_ptr` is returned by the WASM allocator and points to a valid
        // `BufferBuilder` within the instance's linear memory described by `linear_mem`.
        unsafe {
            Ok(BufferMut::from_ptr(
                builder_ptr as *mut BufferBuilder,
                linear_mem,
            ))
        }
    }

    pub(super) fn init_buf_with_capacity(
        &mut self,
        handle: &InstanceHandle,
        capacity: usize,
    ) -> RuntimeResult<BufferMut<'_>> {
        let builder_ptr = self.engine.initiate_buffer(handle, capacity as u32)?;
        let linear_mem = self.linear_mem(handle)?;
        // SAFETY: `builder_ptr` is returned by the WASM allocator and points to a valid
        // `BufferBuilder` within the instance's linear memory described by `linear_mem`.
        unsafe {
            Ok(BufferMut::from_ptr(
                builder_ptr as *mut BufferBuilder,
                linear_mem,
            ))
        }
    }

    /// Write data into a streaming buffer with a `[total_len: u32]` header.
    ///
    /// Allocates a buffer of at most `max_cap` bytes, writes the header and
    /// as much data as fits. If the data exceeds the buffer capacity, the
    /// remainder is stored in `CONTRACT_IO` for on-demand refill.
    pub(super) fn write_streaming_buf(
        &mut self,
        handle: &InstanceHandle,
        instance_id: i64,
        data: &[u8],
        max_cap: usize,
    ) -> RuntimeResult<*mut BufferBuilder> {
        use super::native_api::{CONTRACT_IO, PendingContractData};

        // Header: 4 bytes for total payload length (LE u32)
        let header_size = 4usize;
        debug_assert!(max_cap >= header_size, "max_cap must be >= {header_size}");
        if data.len() > u32::MAX as usize {
            return Err(super::ContractExecError::InvalidArrayLength(data.len()).into());
        }
        let buf_cap = max_cap.min(data.len().saturating_add(header_size));
        let mut buf = self.init_buf_with_capacity(handle, buf_cap)?;

        let total_len = data.len() as u32;
        buf.write(total_len.to_le_bytes())?;

        // Write as much data as fits in the remaining capacity
        let first_chunk_size = data.len().min(buf_cap - header_size);
        buf.write(&data[..first_chunk_size])?;

        let ptr = buf.ptr();

        // Store remainder for the fill callback if data didn't fit
        if first_chunk_size < data.len() {
            CONTRACT_IO.insert(
                (instance_id, ptr as i64),
                PendingContractData {
                    data: data[first_chunk_size..].to_vec(),
                    cursor: 0,
                },
            );
        }

        Ok(ptr)
    }

    /// Write data into a WASM buffer, choosing between the streaming protocol
    /// (for contracts compiled against stdlib >= 0.3.4) and the legacy one-shot
    /// protocol (for older contracts).
    pub(super) fn write_contract_buf(
        &mut self,
        running: &RunningInstance,
        data: &[u8],
        max_cap: usize,
    ) -> RuntimeResult<*mut BufferBuilder> {
        if running.supports_streaming {
            self.write_streaming_buf(&running.handle, running.id, data, max_cap)
        } else {
            let mut buf = self.init_buf(&running.handle, data)?;
            buf.write(data)?;
            Ok(buf.ptr())
        }
    }

    /// Write bincode-serialized data into a WASM buffer, choosing between
    /// streaming and legacy protocols.
    pub(super) fn write_contract_buf_serialized<T: serde::Serialize + ?Sized>(
        &mut self,
        running: &RunningInstance,
        value: &T,
        max_cap: usize,
    ) -> RuntimeResult<*mut BufferBuilder> {
        if running.supports_streaming {
            let serialized = bincode::serialize(value)?;
            self.write_streaming_buf(&running.handle, running.id, &serialized, max_cap)
        } else {
            let size = bincode::serialized_size(value)? as usize;
            let mut buf = self.init_buf_with_capacity(&running.handle, size)?;
            bincode::serialize_into(&mut buf, value)?;
            Ok(buf.ptr())
        }
    }

    pub(super) fn linear_mem(&mut self, handle: &InstanceHandle) -> RuntimeResult<WasmLinearMem> {
        let (ptr, size) = self.engine.memory_info(handle)?;
        // SAFETY: `ptr` and `size` come from the engine's live memory export for this
        // instance, so they describe a valid, allocated linear memory region.
        Ok(unsafe { WasmLinearMem::new(ptr, size as u64) })
    }

    pub(super) fn prepare_contract_call(
        &mut self,
        key: &ContractKey,
        parameters: &Parameters,
        req_bytes: usize,
    ) -> RuntimeResult<RunningInstance> {
        // Check shared cache first (lock held briefly for Arc clone)
        let cached = self.contract_modules.lock().unwrap().get(key).cloned();
        let module = if let Some(module) = cached {
            tracing::debug!(contract = %key, "Module cache hit");
            module
        } else {
            tracing::info!(contract = %key, "Module cache miss — compiling");
            // Cache miss — compile outside the lock to avoid blocking other executors
            let contract = self
                .contract_store
                .fetch_contract(key, parameters)
                .ok_or_else(|| {
                    tracing::error!(
                        contract = %key,
                        key_code_hash = ?key.code_hash(),
                        phase = "prepare_contract_call_failed",
                        "Contract not found in store during WASM execution"
                    );
                    RuntimeInnerError::ContractNotFound(*key)
                })?;
            let code = match contract {
                ContractContainer::Wasm(ContractWasmAPIVersion::V1(contract_v1)) => {
                    contract_v1.code().data().to_vec()
                }
                ContractContainer::Wasm(_) | _ => unimplemented!(),
            };
            let module = self.engine.compile(&code)?;
            // Re-check cache: the LRU lock was released before compilation,
            // so another executor may have compiled and cached this contract.
            let mut cache = self.contract_modules.lock().unwrap();
            if let Some(existing) = cache.get(key).cloned() {
                existing
            } else {
                if let Some((evicted_key, _)) = cache.push(*key, module.clone()) {
                    tracing::warn!(
                        evicted_contract = %evicted_key,
                        cache_capacity = cache.cap().get(),
                        "Module cache eviction. \
                         Consider increasing DEFAULT_MODULE_CACHE_CAPACITY"
                    );
                }
                module
            }
        };
        RunningInstance::new(
            &mut self.engine,
            &module,
            Key::Contract(*key.id()),
            req_bytes,
        )
    }

    /// Prepare a delegate for execution and detect its API version.
    ///
    /// Returns the running instance and the detected API version (V1 or V2).
    /// V2 is detected by inspecting whether the WASM module imports the
    /// `freenet_delegate_contracts` namespace (async host functions).
    pub(super) fn prepare_delegate_call(
        &mut self,
        params: &Parameters,
        key: &DelegateKey,
        req_bytes: usize,
    ) -> RuntimeResult<(RunningInstance, DelegateApiVersion)> {
        let cached = self.delegate_modules.lock().unwrap().get(key).cloned();
        let module = if let Some(module) = cached {
            tracing::debug!(delegate = %key, "Module cache hit");
            module
        } else {
            tracing::info!(delegate = %key, "Module cache miss — compiling");
            let delegate = self
                .delegate_store
                .fetch_delegate(key, params)
                .ok_or_else(|| RuntimeInnerError::DelegateNotFound(key.clone()))?;
            let code = delegate.code().as_ref().to_vec();
            let module = self.engine.compile(&code)?;
            // Re-check cache: the LRU lock was released before compilation,
            // so another executor may have compiled and cached this delegate.
            let mut cache = self.delegate_modules.lock().unwrap();
            if let Some(existing) = cache.get(key).cloned() {
                existing
            } else {
                if let Some((evicted_key, _)) = cache.push(key.clone(), module.clone()) {
                    tracing::warn!(
                        evicted_delegate = %evicted_key,
                        cache_capacity = cache.cap().get(),
                        "Delegate cache eviction. \
                         Consider increasing DEFAULT_MODULE_CACHE_CAPACITY"
                    );
                }
                module
            }
        };

        let api_version = if self.engine.module_has_async_imports(&module) {
            DelegateApiVersion::V2
        } else {
            DelegateApiVersion::V1
        };

        let running = RunningInstance::new(
            &mut self.engine,
            &module,
            Key::Delegate(key.clone()),
            req_bytes,
        )?;
        Ok((running, api_version))
    }
}

impl super::contract::ContractStoreBridge for Runtime {
    fn code_hash_from_id(&self, id: &ContractInstanceId) -> Option<CodeHash> {
        self.contract_store.code_hash_from_id(id)
    }

    fn fetch_contract_code(
        &self,
        key: &ContractKey,
        params: &Parameters<'_>,
    ) -> Option<ContractContainer> {
        self.contract_store.fetch_contract(key, params)
    }

    fn store_contract(&mut self, contract: ContractContainer) -> Result<(), anyhow::Error> {
        self.contract_store.store_contract(contract)?;
        Ok(())
    }

    fn remove_contract(&mut self, key: &ContractKey) -> Result<(), anyhow::Error> {
        self.contract_store.remove_contract(key)?;
        Ok(())
    }

    fn ensure_key_indexed(&mut self, key: &ContractKey) -> Result<(), anyhow::Error> {
        self.contract_store.ensure_key_indexed(key)?;
        Ok(())
    }
}

impl super::contract::ContractRuntimeBridge for Runtime {}