1use crate::scheduler::Scheduler;
2#[cfg(not(target_family = "wasm"))]
3use crate::ChunkCommand;
4use crate::{
5 error::{ScriptError, TransactionScriptError},
6 type_id::TypeIdSystemScript,
7 types::{
8 DebugContext, DebugPrinter, FullSuspendedState, RunMode, ScriptGroup, ScriptGroupType,
9 ScriptVersion, SgData, TransactionState, TxData, VerifyResult,
10 },
11 verify_env::TxVerifyEnv,
12};
13use ckb_chain_spec::consensus::{Consensus, TYPE_ID_CODE_HASH};
14use ckb_error::Error;
15#[cfg(feature = "logging")]
16use ckb_logger::{debug, info};
17use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider};
18use ckb_types::{
19 bytes::Bytes,
20 core::{cell::ResolvedTransaction, Cycle, ScriptHashType},
21 packed::{Byte32, Script},
22 prelude::*,
23};
24#[cfg(not(target_family = "wasm"))]
25use ckb_vm::machine::Pause as VMPause;
26use ckb_vm::Error as VMInternalError;
27use std::sync::Arc;
28#[cfg(not(target_family = "wasm"))]
29use tokio::sync::{
30 oneshot,
31 watch::{self, Receiver},
32};
33
34#[cfg(test)]
35use core::sync::atomic::{AtomicBool, Ordering};
36
37#[cfg(test)]
38mod tests;
39
40pub enum ChunkState {
41 Suspended(Option<FullSuspendedState>),
42 Completed(Cycle, Cycle),
44}
45
46impl ChunkState {
47 pub fn suspended(state: FullSuspendedState) -> Self {
48 ChunkState::Suspended(Some(state))
49 }
50
51 pub fn suspended_type_id() -> Self {
52 ChunkState::Suspended(None)
53 }
54}
55
56pub struct TransactionScriptsVerifier<DL> {
58 tx_data: Arc<TxData<DL>>,
59
60 debug_printer: DebugPrinter,
61 #[cfg(test)]
62 skip_pause: Arc<AtomicBool>,
63}
64
65impl<DL> TransactionScriptsVerifier<DL>
66where
67 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
68{
69 pub fn new(
76 rtx: Arc<ResolvedTransaction>,
77 data_loader: DL,
78 consensus: Arc<Consensus>,
79 tx_env: Arc<TxVerifyEnv>,
80 ) -> TransactionScriptsVerifier<DL> {
81 let tx_data = Arc::new(TxData::new(rtx, data_loader, consensus, tx_env));
82
83 let debug_printer: DebugPrinter = Arc::new(
84 #[allow(unused_variables)]
85 |hash: &Byte32, message: &str| {
86 #[cfg(feature = "logging")]
87 debug!("script group: {} DEBUG OUTPUT: {}", hash, message);
88 },
89 );
90
91 #[cfg(test)]
92 let skip_pause = Arc::new(AtomicBool::new(false));
93
94 TransactionScriptsVerifier {
95 tx_data,
96 debug_printer,
97 #[cfg(test)]
98 skip_pause,
99 }
100 }
101
102 pub fn set_debug_printer<F: Fn(&Byte32, &str) + Sync + Send + 'static>(&mut self, func: F) {
112 self.debug_printer = Arc::new(func);
113 }
114
115 #[cfg(test)]
116 pub(crate) fn set_skip_pause(&self, skip_pause: bool) {
117 self.skip_pause.store(skip_pause, Ordering::SeqCst);
118 }
119
120 #[inline]
127 #[allow(dead_code)]
128 fn hash(&self) -> Byte32 {
129 self.tx_data.tx_hash()
130 }
131
132 pub fn extract_script(&self, script: &Script) -> Result<Bytes, ScriptError> {
134 self.tx_data.extract_script(script)
135 }
136
137 pub fn select_version(&self, script: &Script) -> Result<ScriptVersion, ScriptError> {
139 self.tx_data.select_version(script)
140 }
141
142 pub fn groups(&self) -> impl Iterator<Item = (&'_ Byte32, &'_ ScriptGroup)> {
144 self.tx_data.groups()
145 }
146
147 pub fn groups_with_type(
149 &self,
150 ) -> impl Iterator<Item = (ScriptGroupType, &'_ Byte32, &'_ ScriptGroup)> {
151 self.tx_data.groups_with_type()
152 }
153
154 pub fn find_script_group(
156 &self,
157 script_group_type: ScriptGroupType,
158 script_hash: &Byte32,
159 ) -> Option<&ScriptGroup> {
160 self.tx_data
161 .find_script_group(script_group_type, script_hash)
162 }
163
164 pub fn verify(&self, max_cycles: Cycle) -> Result<Cycle, Error> {
179 let mut cycles: Cycle = 0;
180
181 for (_hash, group) in self.groups() {
183 let used_cycles = self
185 .verify_script_group(group, max_cycles - cycles)
186 .map_err(|e| {
187 #[cfg(feature = "logging")]
188 logging::on_script_error(_hash, &self.hash(), &e);
189 e.source(group)
190 })?;
191
192 cycles = wrapping_cycles_add(cycles, used_cycles, group)?;
193 }
194 Ok(cycles)
195 }
196
197 pub fn resumable_verify(&self, limit_cycles: Cycle) -> Result<VerifyResult, Error> {
209 let mut cycles = 0;
210 let mut current_consumed_cycles = 0;
211
212 let groups: Vec<_> = self.groups().collect();
213 for (idx, (_hash, group)) in groups.iter().enumerate() {
214 let remain_cycles = limit_cycles
216 .checked_sub(current_consumed_cycles)
217 .ok_or_else(|| {
218 ScriptError::Other(format!("expect invalid cycles {limit_cycles} {cycles}"))
219 .source(group)
220 })?;
221
222 match self.verify_group_with_chunk(group, remain_cycles, &None) {
223 Ok(ChunkState::Completed(used_cycles, consumed_cycles)) => {
224 current_consumed_cycles =
225 wrapping_cycles_add(current_consumed_cycles, consumed_cycles, group)?;
226 cycles = wrapping_cycles_add(cycles, used_cycles, group)?;
227 }
228 Ok(ChunkState::Suspended(state)) => {
229 let current = idx;
230 let state = TransactionState::new(state, current, cycles, remain_cycles);
231 return Ok(VerifyResult::Suspended(state));
232 }
233 Err(e) => {
234 #[cfg(feature = "logging")]
235 logging::on_script_error(_hash, &self.hash(), &e);
236 return Err(e.source(group).into());
237 }
238 }
239 }
240
241 Ok(VerifyResult::Completed(cycles))
242 }
243
244 #[cfg(not(target_family = "wasm"))]
248 pub async fn resumable_verify_with_signal(
249 &self,
250 limit_cycles: Cycle,
251 command_rx: &mut Receiver<ChunkCommand>,
252 ) -> Result<Cycle, Error> {
253 let mut cycles = 0;
254
255 let groups: Vec<_> = self.groups().collect();
256 for (_hash, group) in groups.iter() {
257 let remain_cycles = limit_cycles.checked_sub(cycles).ok_or_else(|| {
259 ScriptError::Other(format!("expect invalid cycles {limit_cycles} {cycles}"))
260 .source(group)
261 })?;
262
263 match self
264 .verify_group_with_signal(group, remain_cycles, command_rx)
265 .await
266 {
267 Ok(used_cycles) => {
268 cycles = wrapping_cycles_add(cycles, used_cycles, group)?;
269 }
270 Err(e) => {
271 #[cfg(feature = "logging")]
272 logging::on_script_error(_hash, &self.hash(), &e);
273 return Err(e.source(group).into());
274 }
275 }
276 }
277
278 Ok(cycles)
279 }
280
281 pub fn resume_from_state(
295 &self,
296 state: &TransactionState,
297 limit_cycles: Cycle,
298 ) -> Result<VerifyResult, Error> {
299 let TransactionState {
300 current,
301 state,
302 current_cycles,
303 ..
304 } = state;
305
306 let mut current_used = 0;
307 let mut cycles = *current_cycles;
308
309 let (_hash, current_group) = self.groups().nth(*current).ok_or_else(|| {
310 ScriptError::Other(format!("snapshot group missing {current:?}")).unknown_source()
311 })?;
312
313 let resumed_script_result =
314 self.verify_group_with_chunk(current_group, limit_cycles, state);
315
316 match resumed_script_result {
317 Ok(ChunkState::Completed(used_cycles, consumed_cycles)) => {
318 current_used = wrapping_cycles_add(current_used, consumed_cycles, current_group)?;
319 cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?;
320 }
321 Ok(ChunkState::Suspended(state)) => {
322 let state = TransactionState::new(state, *current, cycles, limit_cycles);
323 return Ok(VerifyResult::Suspended(state));
324 }
325 Err(e) => {
326 #[cfg(feature = "logging")]
327 logging::on_script_error(_hash, &self.hash(), &e);
328 return Err(e.source(current_group).into());
329 }
330 }
331
332 for (idx, (_hash, group)) in self.groups().enumerate().skip(current + 1) {
333 let remain_cycles = limit_cycles.checked_sub(current_used).ok_or_else(|| {
334 ScriptError::Other(format!(
335 "expect invalid cycles {limit_cycles} {current_used} {cycles}"
336 ))
337 .source(group)
338 })?;
339
340 match self.verify_group_with_chunk(group, remain_cycles, &None) {
341 Ok(ChunkState::Completed(_, consumed_cycles)) => {
342 current_used = wrapping_cycles_add(current_used, consumed_cycles, group)?;
343 cycles = wrapping_cycles_add(cycles, consumed_cycles, group)?;
344 }
345 Ok(ChunkState::Suspended(state)) => {
346 let current = idx;
347 let state = TransactionState::new(state, current, cycles, remain_cycles);
348 return Ok(VerifyResult::Suspended(state));
349 }
350 Err(e) => {
351 #[cfg(feature = "logging")]
352 logging::on_script_error(_hash, &self.hash(), &e);
353 return Err(e.source(group).into());
354 }
355 }
356 }
357
358 Ok(VerifyResult::Completed(cycles))
359 }
360
361 pub fn complete(&self, snap: &TransactionState, max_cycles: Cycle) -> Result<Cycle, Error> {
374 let mut cycles = snap.current_cycles;
375
376 let (_hash, current_group) = self.groups().nth(snap.current).ok_or_else(|| {
377 ScriptError::Other(format!("snapshot group missing {:?}", snap.current))
378 .unknown_source()
379 })?;
380
381 if max_cycles < cycles {
382 return Err(ScriptError::ExceededMaximumCycles(max_cycles)
383 .source(current_group)
384 .into());
385 }
386
387 match self.verify_group_with_chunk(current_group, max_cycles - cycles, &snap.state) {
390 Ok(ChunkState::Completed(used_cycles, _consumed_cycles)) => {
391 cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?;
392 }
393 Ok(ChunkState::Suspended(_)) => {
394 return Err(ScriptError::ExceededMaximumCycles(max_cycles)
395 .source(current_group)
396 .into());
397 }
398 Err(e) => {
399 #[cfg(feature = "logging")]
400 logging::on_script_error(_hash, &self.hash(), &e);
401 return Err(e.source(current_group).into());
402 }
403 }
404
405 for (_hash, group) in self.groups().skip(snap.current + 1) {
406 let remain_cycles = max_cycles.checked_sub(cycles).ok_or_else(|| {
407 ScriptError::Other(format!("expect invalid cycles {max_cycles} {cycles}"))
408 .source(group)
409 })?;
410
411 match self.verify_group_with_chunk(group, remain_cycles, &None) {
412 Ok(ChunkState::Completed(used_cycles, _consumed_cycles)) => {
413 cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?;
414 }
415 Ok(ChunkState::Suspended(_)) => {
416 return Err(ScriptError::ExceededMaximumCycles(max_cycles)
417 .source(group)
418 .into());
419 }
420 Err(e) => {
421 #[cfg(feature = "logging")]
422 logging::on_script_error(_hash, &self.hash(), &e);
423 return Err(e.source(group).into());
424 }
425 }
426 }
427
428 Ok(cycles)
429 }
430
431 pub fn verify_single(
434 &self,
435 script_group_type: ScriptGroupType,
436 script_hash: &Byte32,
437 max_cycles: Cycle,
438 ) -> Result<Cycle, ScriptError> {
439 match self.find_script_group(script_group_type, script_hash) {
440 Some(group) => self.verify_script_group(group, max_cycles),
441 None => Err(ScriptError::ScriptNotFound(script_hash.clone())),
442 }
443 }
444
445 fn verify_script_group(
446 &self,
447 group: &ScriptGroup,
448 max_cycles: Cycle,
449 ) -> Result<Cycle, ScriptError> {
450 if group.script.code_hash() == TYPE_ID_CODE_HASH.pack()
451 && Into::<u8>::into(group.script.hash_type()) == Into::<u8>::into(ScriptHashType::Type)
452 {
453 let verifier = TypeIdSystemScript {
454 rtx: &self.tx_data.rtx,
455 script_group: group,
456 max_cycles,
457 };
458 verifier.verify()
459 } else {
460 self.run(group, max_cycles)
461 }
462 }
463
464 fn verify_group_with_chunk(
465 &self,
466 group: &ScriptGroup,
467 max_cycles: Cycle,
468 state: &Option<FullSuspendedState>,
469 ) -> Result<ChunkState, ScriptError> {
470 if group.script.code_hash() == TYPE_ID_CODE_HASH.pack()
471 && Into::<u8>::into(group.script.hash_type()) == Into::<u8>::into(ScriptHashType::Type)
472 {
473 let verifier = TypeIdSystemScript {
474 rtx: &self.tx_data.rtx,
475 script_group: group,
476 max_cycles,
477 };
478 match verifier.verify() {
479 Ok(cycles) => Ok(ChunkState::Completed(cycles, cycles)),
480 Err(ScriptError::ExceededMaximumCycles(_)) => Ok(ChunkState::suspended_type_id()),
481 Err(e) => Err(e),
482 }
483 } else {
484 self.chunk_run(group, max_cycles, state)
485 }
486 }
487
488 fn chunk_run(
489 &self,
490 script_group: &ScriptGroup,
491 max_cycles: Cycle,
492 state: &Option<FullSuspendedState>,
493 ) -> Result<ChunkState, ScriptError> {
494 let mut scheduler = if let Some(state) = state {
495 self.resume_scheduler(script_group, state)
496 } else {
497 self.create_scheduler(script_group)
498 }?;
499 let previous_cycles = scheduler.consumed_cycles();
500 let res = scheduler.run(RunMode::LimitCycles(max_cycles));
501 match res {
502 Ok((exit_code, cycles)) => {
503 if exit_code == 0 {
504 Ok(ChunkState::Completed(
505 cycles,
506 scheduler.consumed_cycles() - previous_cycles,
507 ))
508 } else {
509 Err(ScriptError::validation_failure(
510 &script_group.script,
511 exit_code,
512 ))
513 }
514 }
515 Err(error) => match error {
516 VMInternalError::CyclesExceeded | VMInternalError::Pause => {
517 let snapshot = scheduler
518 .suspend()
519 .map_err(|err| self.map_vm_internal_error(err, max_cycles))?;
520 Ok(ChunkState::suspended(snapshot))
521 }
522 _ => Err(self.map_vm_internal_error(error, max_cycles)),
523 },
524 }
525 }
526
527 #[cfg(not(target_family = "wasm"))]
528 async fn verify_group_with_signal(
529 &self,
530 group: &ScriptGroup,
531 max_cycles: Cycle,
532 command_rx: &mut Receiver<ChunkCommand>,
533 ) -> Result<Cycle, ScriptError> {
534 if group.script.code_hash() == TYPE_ID_CODE_HASH.pack()
535 && Into::<u8>::into(group.script.hash_type()) == Into::<u8>::into(ScriptHashType::Type)
536 {
537 let verifier = TypeIdSystemScript {
538 rtx: &self.tx_data.rtx,
539 script_group: group,
540 max_cycles,
541 };
542 verifier.verify()
543 } else {
544 self.chunk_run_with_signal(group, max_cycles, command_rx)
545 .await
546 }
547 }
548
549 pub fn create_scheduler(
551 &self,
552 script_group: &ScriptGroup,
553 ) -> Result<Scheduler<DL>, ScriptError> {
554 let sg_data = SgData::new(&self.tx_data, script_group)?;
555 let debug_context = DebugContext {
556 debug_printer: Arc::clone(&self.debug_printer),
557 #[cfg(test)]
558 skip_pause: Arc::clone(&self.skip_pause),
559 };
560 Ok(Scheduler::new(sg_data, debug_context))
561 }
562
563 pub fn resume_scheduler(
565 &self,
566 script_group: &ScriptGroup,
567 state: &FullSuspendedState,
568 ) -> Result<Scheduler<DL>, ScriptError> {
569 let sg_data = SgData::new(&self.tx_data, script_group)?;
570 let debug_context = DebugContext {
571 debug_printer: Arc::clone(&self.debug_printer),
572 #[cfg(test)]
573 skip_pause: Arc::clone(&self.skip_pause),
574 };
575 Ok(Scheduler::resume(sg_data, debug_context, state.clone()))
576 }
577
578 pub fn detailed_run(
581 &self,
582 script_group: &ScriptGroup,
583 max_cycles: Cycle,
584 ) -> Result<(i8, Cycle), ScriptError> {
585 let mut scheduler = self.create_scheduler(script_group)?;
586 scheduler
587 .run(RunMode::LimitCycles(max_cycles))
588 .map_err(|err| self.map_vm_internal_error(err, max_cycles))
589 }
590
591 fn run(&self, script_group: &ScriptGroup, max_cycles: Cycle) -> Result<Cycle, ScriptError> {
592 let (code, cycles) = self.detailed_run(script_group, max_cycles)?;
593
594 if code == 0 {
595 Ok(cycles)
596 } else {
597 Err(ScriptError::validation_failure(&script_group.script, code))
598 }
599 }
600
601 fn map_vm_internal_error(&self, error: VMInternalError, max_cycles: Cycle) -> ScriptError {
602 match error {
603 VMInternalError::CyclesExceeded => ScriptError::ExceededMaximumCycles(max_cycles),
604 VMInternalError::External(reason) if reason.eq("stopped") => ScriptError::Interrupts,
605 _ => ScriptError::VMInternalError(error),
606 }
607 }
608
609 #[cfg(not(target_family = "wasm"))]
610 async fn chunk_run_with_signal(
611 &self,
612 script_group: &ScriptGroup,
613 max_cycles: Cycle,
614 signal: &mut Receiver<ChunkCommand>,
615 ) -> Result<Cycle, ScriptError> {
616 let mut scheduler = self.create_scheduler(script_group)?;
617 let mut pause = VMPause::new();
618 let child_pause = pause.clone();
619 let (finish_tx, mut finish_rx) = oneshot::channel::<Result<(i8, Cycle), ckb_vm::Error>>();
620
621 let (child_tx, mut child_rx) = watch::channel(ChunkCommand::Resume);
625 let jh = tokio::spawn(async move {
626 child_rx.mark_changed();
627 loop {
628 let pause_cloned = child_pause.clone();
629 let _ = child_rx.changed().await;
630 match *child_rx.borrow() {
631 ChunkCommand::Stop => {
632 let exit = Err(ckb_vm::Error::External("stopped".into()));
633 let _ = finish_tx.send(exit);
634 return;
635 }
636 ChunkCommand::Suspend => {
637 continue;
638 }
639 ChunkCommand::Resume => {
640 let res = scheduler.run(RunMode::Pause(pause_cloned));
642 match res {
643 Ok(_) => {
644 let _ = finish_tx.send(res);
645 return;
646 }
647 Err(VMInternalError::Pause) => {
648 }
650 _ => {
651 let _ = finish_tx.send(res);
652 return;
653 }
654 }
655 }
656 }
657 }
658 });
659
660 loop {
661 tokio::select! {
662 Ok(_) = signal.changed() => {
663 let command = signal.borrow().to_owned();
664 match command {
666 ChunkCommand::Suspend => {
667 pause.interrupt();
668 }
669 ChunkCommand::Stop => {
670 pause.interrupt();
671 let _ = child_tx.send(command);
672 }
673 ChunkCommand::Resume => {
674 pause.free();
675 let _ = child_tx.send(command);
676 }
677 }
678 }
679 Ok(res) = &mut finish_rx => {
680 let _ = jh.await;
681 match res {
682 Ok((0, cycles)) => {
683 return Ok(cycles);
684 }
685 Ok((exit_code, _cycles)) => {
686 return Err(ScriptError::validation_failure(
687 &script_group.script,
688 exit_code
689 ))},
690 Err(err) => {
691 return Err(self.map_vm_internal_error(err, max_cycles));
692 }
693 }
694
695 }
696 else => { break Err(ScriptError::validation_failure(&script_group.script, 0)) }
697 }
698 }
699 }
700}
701
702fn wrapping_cycles_add(
703 lhs: Cycle,
704 rhs: Cycle,
705 group: &ScriptGroup,
706) -> Result<Cycle, TransactionScriptError> {
707 lhs.checked_add(rhs)
708 .ok_or_else(|| ScriptError::CyclesOverflow(lhs, rhs).source(group))
709}
710
711#[cfg(feature = "logging")]
712mod logging {
713 use super::{info, Byte32, ScriptError};
714
715 pub fn on_script_error(group: &Byte32, tx: &Byte32, error: &ScriptError) {
716 info!(
717 "Error validating script group {} of transaction {}: {}",
718 group, tx, error
719 );
720 }
721}