1use std::mem::ManuallyDrop;
2
3use ahash::HashMap;
4use tycho_types::cell::CellTreeStats;
5use tycho_types::models::{
6 IntAddr, ShardIdent, SimpleLib, SizeLimitsConfig, StateInit, StdAddr, WorkchainDescription,
7 WorkchainFormat,
8};
9use tycho_types::num::{VarUint24, VarUint56};
10use tycho_types::prelude::*;
11
12#[inline(always)]
14pub(crate) const fn unlikely(b: bool) -> bool {
15 #[allow(clippy::needless_bool, clippy::bool_to_int_with_if)]
16 if (1i32).checked_div(if b { 0 } else { 1 }).is_none() {
17 true
18 } else {
19 false
20 }
21}
22
23#[derive(Debug, Default)]
24pub struct StorageStatLimits {
25 pub bit_count: u32,
26 pub cell_count: u32,
27}
28
29impl StorageStatLimits {
30 pub const UNLIMITED: Self = Self {
31 bit_count: u32::MAX,
32 cell_count: u32::MAX,
33 };
34}
35
36pub struct OwnedExtStorageStat {
37 cells: Vec<Cell>,
38 inner: ManuallyDrop<ExtStorageStat<'static>>,
39}
40
41impl OwnedExtStorageStat {
42 pub fn unlimited() -> Self {
43 Self::with_limits(StorageStatLimits::UNLIMITED)
44 }
45
46 pub fn with_limits(limits: StorageStatLimits) -> Self {
47 Self {
48 cells: Vec::new(),
49 inner: ManuallyDrop::new(ExtStorageStat::with_limits(limits)),
50 }
51 }
52
53 pub fn set_unlimited(&mut self) {
54 self.inner.limits = StorageStatLimits::UNLIMITED;
55 }
56
57 pub fn stats(&self) -> CellTreeStats {
58 self.inner.stats()
59 }
60
61 pub fn add_cell(&mut self, cell: Cell) -> bool {
62 if self.inner.visited.contains_key(cell.repr_hash()) {
63 return true;
64 }
65
66 let cell_ref = unsafe { std::mem::transmute::<&DynCell, &'static DynCell>(cell.as_ref()) };
69 let res = self.inner.add_cell(cell_ref);
70 self.cells.push(cell);
71 res
72 }
73
74 pub fn clear(&mut self) {
75 self.inner.visited.clear();
76 self.inner.cells = 0;
77 self.inner.bits = 0;
78
79 self.cells.clear();
81 }
82}
83
84impl Drop for OwnedExtStorageStat {
85 fn drop(&mut self) {
86 unsafe { ManuallyDrop::drop(&mut self.inner) };
89 }
90}
91
92impl std::fmt::Debug for OwnedExtStorageStat {
93 #[inline]
94 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95 std::fmt::Debug::fmt(&self.inner, f)
96 }
97}
98
99#[derive(Default)]
100pub struct ExtStorageStat<'a> {
101 visited: ahash::HashMap<&'a HashBytes, u8>,
102 limits: StorageStatLimits,
103 pub cells: u32,
104 pub bits: u32,
105}
106
107impl<'a> ExtStorageStat<'a> {
108 pub const MAX_ALLOWED_MERKLE_DEPTH: u8 = 2;
109
110 pub fn with_limits(limits: StorageStatLimits) -> Self {
111 Self {
112 visited: ahash::HashMap::default(),
113 limits,
114 cells: 0,
115 bits: 0,
116 }
117 }
118
119 pub fn compute_for_slice(
120 cs: &CellSlice<'a>,
121 limits: StorageStatLimits,
122 ) -> Option<CellTreeStats> {
123 let mut state = Self {
124 visited: ahash::HashMap::default(),
125 limits,
126 cells: 1,
127 bits: cs.size_bits() as u32,
128 };
129
130 for cell in cs.references() {
131 state.add_cell_impl(cell)?;
132 }
133
134 Some(CellTreeStats {
135 bit_count: state.bits as u64,
136 cell_count: state.cells as u64,
137 })
138 }
139
140 pub fn stats(&self) -> CellTreeStats {
141 CellTreeStats {
142 bit_count: self.bits as u64,
143 cell_count: self.cells as u64,
144 }
145 }
146
147 pub fn add_cell(&mut self, cell: &'a DynCell) -> bool {
148 self.add_cell_impl(cell).is_some()
149 }
150
151 fn add_cell_impl(&mut self, cell: &'a DynCell) -> Option<u8> {
152 if let Some(merkle_depth) = self.visited.get(cell.repr_hash()).copied() {
153 return Some(merkle_depth);
154 }
155
156 self.cells = self.cells.checked_add(1)?;
157 self.bits = self.bits.checked_add(cell.bit_len() as u32)?;
158
159 if self.cells > self.limits.cell_count || self.bits > self.limits.bit_count {
160 return None;
161 }
162
163 let mut max_merkle_depth = 0u8;
164 for cell in cell.references() {
165 max_merkle_depth = std::cmp::max(self.add_cell_impl(cell)?, max_merkle_depth);
166 }
167 max_merkle_depth = max_merkle_depth.saturating_add(cell.cell_type().is_merkle() as u8);
168
169 self.visited.insert(cell.repr_hash(), max_merkle_depth);
170 (max_merkle_depth <= Self::MAX_ALLOWED_MERKLE_DEPTH).then_some(max_merkle_depth)
171 }
172}
173
174impl std::fmt::Debug for ExtStorageStat<'_> {
175 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
176 f.debug_struct("ExtStorageStat")
177 .field("limits", &self.limits)
178 .field("bits", &self.bits)
179 .field("cells", &self.cells)
180 .finish()
181 }
182}
183
184pub fn new_varuint24_truncate(value: u64) -> VarUint24 {
185 VarUint24::new(std::cmp::min(value, VarUint24::MAX.into_inner() as u64) as _)
186}
187
188pub fn new_varuint56_truncate(value: u64) -> VarUint56 {
189 VarUint56::new(std::cmp::min(value, VarUint56::MAX.into_inner()))
190}
191
192pub fn check_rewrite_src_addr(my_addr: &StdAddr, addr: &mut Option<IntAddr>) -> bool {
194 match addr {
195 None => {
197 *addr = Some(my_addr.clone().into());
198 true
199 }
200 Some(IntAddr::Std(addr)) if addr == my_addr => true,
202 Some(_) => false,
204 }
205}
206
207pub fn check_rewrite_dst_addr(
209 workchains: &HashMap<i32, WorkchainDescription>,
210 addr: &mut IntAddr,
211) -> bool {
212 const STD_WORKCHAINS: std::ops::Range<i32> = -128..128;
213 const STD_ADDR_LEN: u16 = 256;
214
215 let mut can_rewrite = false;
217 let workchain = match addr {
218 IntAddr::Std(addr) => {
219 if addr.anycast.is_some() {
220 return false;
222 }
223
224 addr.workchain as i32
225 }
226 IntAddr::Var(addr) => {
227 if addr.anycast.is_some() {
228 return false;
230 }
231
232 can_rewrite = addr.address_len.into_inner() == STD_ADDR_LEN
235 && STD_WORKCHAINS.contains(&addr.workchain);
236
237 addr.workchain
238 }
239 };
240
241 if workchain != ShardIdent::MASTERCHAIN.workchain() {
242 let Some(workchain) = workchains.get(&workchain) else {
243 return false;
245 };
246
247 if !workchain.accept_msgs {
248 return false;
250 }
251
252 match (&workchain.format, &*addr) {
253 (WorkchainFormat::Basic(_), IntAddr::Std(_)) => {}
255 (WorkchainFormat::Basic(_), IntAddr::Var(_)) if can_rewrite => {}
257 (WorkchainFormat::Extended(f), IntAddr::Std(_)) if f.check_addr_len(STD_ADDR_LEN) => {}
259 (WorkchainFormat::Extended(f), IntAddr::Var(a))
261 if f.check_addr_len(a.address_len.into_inner()) => {}
262 _ => return false,
264 }
265 }
266
267 if can_rewrite && let IntAddr::Var(var) = addr {
269 debug_assert!(STD_WORKCHAINS.contains(&var.workchain));
270 debug_assert_eq!(var.address_len.into_inner(), STD_ADDR_LEN);
271
272 let len = std::cmp::min(var.address.len(), 32);
274 let mut address = [0; 32];
275 address[..len].copy_from_slice(&var.address[..len]);
276
277 *addr = IntAddr::Std(StdAddr::new(var.workchain as i8, HashBytes(address)));
279 }
280
281 true
283}
284
285pub enum StateLimitsResult {
286 Unchanged,
287 Exceeds,
288 Fits,
289}
290
291pub fn check_state_limits_diff(
293 old_state: &StateInit,
294 new_state: &StateInit,
295 limits: &SizeLimitsConfig,
296 is_masterchain: bool,
297 stats_cache: &mut Option<OwnedExtStorageStat>,
298) -> StateLimitsResult {
299 fn unpack_state(state: &StateInit) -> (Option<&'_ Cell>, Option<&'_ Cell>, &'_ StateLibs) {
301 (state.code.as_ref(), state.data.as_ref(), &state.libraries)
302 }
303
304 let (old_code, old_data, old_libs) = unpack_state(old_state);
305 let (new_code, new_data, new_libs) = unpack_state(new_state);
306
307 let libs_changed = old_libs != new_libs;
309 if old_code == new_code && old_data == new_data && !libs_changed {
310 return StateLimitsResult::Unchanged;
311 }
312
313 let check_public_libs = is_masterchain && libs_changed;
316
317 check_state_limits(
318 new_code,
319 new_data,
320 new_libs,
321 limits,
322 check_public_libs,
323 stats_cache,
324 )
325}
326
327pub fn check_state_limits(
328 code: Option<&Cell>,
329 data: Option<&Cell>,
330 libs: &StateLibs,
331 limits: &SizeLimitsConfig,
332 check_public_libs: bool,
333 stats_cache: &mut Option<OwnedExtStorageStat>,
334) -> StateLimitsResult {
335 let mut stats = OwnedExtStorageStat::with_limits(StorageStatLimits {
337 bit_count: limits.max_acc_state_bits,
338 cell_count: limits.max_acc_state_cells,
339 });
340
341 if let Some(code) = code
342 && !stats.add_cell(code.clone())
343 {
344 return StateLimitsResult::Exceeds;
345 }
346
347 if let Some(data) = data
348 && !stats.add_cell(data.clone())
349 {
350 return StateLimitsResult::Exceeds;
351 }
352
353 if let Some(libs) = libs.root()
354 && !stats.add_cell(libs.clone())
355 {
356 return StateLimitsResult::Exceeds;
357 }
358
359 if check_public_libs {
362 let mut public_libs_count = 0;
363 for lib in libs.values() {
364 let Ok(lib) = lib else {
365 return StateLimitsResult::Exceeds;
366 };
367
368 public_libs_count += lib.public as usize;
369 if public_libs_count > limits.max_acc_public_libraries as usize {
370 return StateLimitsResult::Exceeds;
371 }
372 }
373 }
374
375 *stats_cache = Some(stats);
377 StateLimitsResult::Fits
378}
379
380type StateLibs = Dict<HashBytes, SimpleLib>;
381
382pub const fn shift_ceil_price(value: u128) -> u128 {
383 let r = value & 0xffff != 0;
384 (value >> 16) + r as u128
385}
386
387#[cfg(test)]
388mod tests {
389 use super::*;
390
391 #[test]
392 fn miri_check() {
393 let mut owned = OwnedExtStorageStat::with_limits(StorageStatLimits {
395 bit_count: 1000,
396 cell_count: 1000,
397 });
398
399 fn fill(owned: &mut OwnedExtStorageStat) {
400 let res = owned.add_cell(Cell::empty_cell());
401 assert!(res);
402 assert_eq!(owned.inner.bits, 0);
403 assert_eq!(owned.inner.cells, 1);
404
405 let res = owned.add_cell({
406 let mut b = CellBuilder::new();
407 b.store_u32(123).unwrap();
408 b.store_reference(Cell::empty_cell()).unwrap();
409 b.build().unwrap()
410 });
411 assert!(res);
412 assert_eq!(owned.inner.bits, 32);
413 assert_eq!(owned.inner.cells, 2);
414
415 let res = owned.add_cell({
417 let mut b = CellBuilder::new();
418 b.store_u32(123).unwrap();
419 b.store_reference(Cell::empty_cell()).unwrap();
420 b.build().unwrap()
421 });
422 assert!(res);
423 assert_eq!(owned.inner.bits, 32);
424 assert_eq!(owned.inner.cells, 2);
425 }
426
427 fill(&mut owned);
428 drop(owned);
429
430 let mut owned = OwnedExtStorageStat::with_limits(StorageStatLimits {
432 bit_count: 1000,
433 cell_count: 1000,
434 });
435
436 fill(&mut owned);
437 owned.clear();
438 fill(&mut owned);
439 }
440}