1use core::{fmt, str};
7#[cfg(feature = "std")]
8use std::error;
9
10use bitcoin::absolute;
11#[cfg(feature = "compiler")]
12use {
13 crate::descriptor::TapTree,
14 crate::miniscript::ScriptContext,
15 crate::policy::compiler::{self, CompilerError, OrdF64},
16 crate::Descriptor,
17 crate::Miniscript,
18 crate::Tap,
19 core::cmp::Reverse,
20};
21
22use crate::expression::{self, FromTree};
23use crate::iter::{Tree, TreeLike};
24use crate::miniscript::types::extra_props::TimelockInfo;
25use crate::prelude::*;
26use crate::sync::Arc;
27#[cfg(all(doc, not(feature = "compiler")))]
28use crate::Descriptor;
29use crate::{
30 AbsLockTime, Error, ForEachKey, FromStrKey, MiniscriptKey, RelLockTime, Threshold, Translator,
31};
32
33#[cfg(feature = "compiler")]
35const MAX_COMPILATION_LEAVES: usize = 1024;
36
37#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
44pub enum Policy<Pk: MiniscriptKey> {
45 Unsatisfiable,
47 Trivial,
49 Key(Pk),
51 After(AbsLockTime),
53 Older(RelLockTime),
55 Sha256(Pk::Sha256),
57 Hash256(Pk::Hash256),
59 Ripemd160(Pk::Ripemd160),
61 Hash160(Pk::Hash160),
63 And(Vec<Arc<Policy<Pk>>>),
65 Or(Vec<(usize, Arc<Policy<Pk>>)>),
68 Thresh(Threshold<Arc<Policy<Pk>>, 0>),
70}
71
72#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
74pub enum PolicyError {
75 HeightTimelockCombination,
77 DuplicatePubKeys,
79}
80
81pub enum DescriptorCtx<Pk> {
83 Bare,
85 Sh,
87 Wsh,
89 ShWsh,
91 Tr(Option<Pk>),
94}
95
96impl fmt::Display for PolicyError {
97 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
98 match *self {
99 PolicyError::HeightTimelockCombination => {
100 f.write_str("Cannot lift policies that have a heightlock and timelock combination")
101 }
102 PolicyError::DuplicatePubKeys => f.write_str("Policy contains duplicate keys"),
103 }
104 }
105}
106
107#[cfg(feature = "std")]
108impl error::Error for PolicyError {
109 fn cause(&self) -> Option<&dyn error::Error> {
110 use self::PolicyError::*;
111
112 match self {
113 HeightTimelockCombination | DuplicatePubKeys => None,
114 }
115 }
116}
117
118#[cfg(feature = "compiler")]
119struct TapleafProbabilityIter<'p, Pk: MiniscriptKey> {
120 stack: Vec<(f64, &'p Policy<Pk>)>,
121}
122
123#[cfg(feature = "compiler")]
124impl<'p, Pk: MiniscriptKey> Iterator for TapleafProbabilityIter<'p, Pk> {
125 type Item = (f64, &'p Policy<Pk>);
126
127 fn next(&mut self) -> Option<Self::Item> {
128 loop {
129 let (top_prob, top) = self.stack.pop()?;
130
131 match top {
132 Policy::Or(ref subs) => {
133 let total_sub_prob = subs.iter().map(|prob_sub| prob_sub.0).sum::<usize>();
134 for (sub_prob, sub) in subs.iter().rev() {
135 let ratio = *sub_prob as f64 / total_sub_prob as f64;
136 self.stack.push((top_prob * ratio, sub));
137 }
138 }
139 Policy::Thresh(ref thresh) if thresh.is_or() => {
140 let n64 = thresh.n() as f64;
141 for sub in thresh.iter().rev() {
142 self.stack.push((top_prob / n64, sub));
143 }
144 }
145 _ => return Some((top_prob, top)),
146 }
147 }
148 }
149}
150
151impl<Pk: MiniscriptKey> Policy<Pk> {
152 #[cfg(feature = "compiler")]
153 fn check_binary_ops(&self) -> Result<(), CompilerError> {
154 for policy in self.pre_order_iter() {
155 match *policy {
156 Policy::And(ref subs) => {
157 if subs.len() != 2 {
158 return Err(CompilerError::NonBinaryArgAnd);
159 }
160 }
161 Policy::Or(ref subs) => {
162 if subs.len() != 2 {
163 return Err(CompilerError::NonBinaryArgOr);
164 }
165 }
166 _ => {}
167 }
168 }
169 Ok(())
170 }
171
172 #[cfg(feature = "compiler")]
195 fn tapleaf_probability_iter(&self) -> TapleafProbabilityIter<'_, Pk> {
196 TapleafProbabilityIter { stack: vec![(1.0, self)] }
197 }
198
199 #[cfg(feature = "compiler")]
201 fn extract_key(self, unspendable_key: Option<Pk>) -> Result<(Pk, Policy<Pk>), CompilerError> {
202 let internal_key = self
203 .tapleaf_probability_iter()
204 .filter_map(|(prob, ref pol)| match pol {
205 Policy::Key(pk) => Some((OrdF64(prob), pk)),
206 _ => None,
207 })
208 .max_by_key(|(prob, _)| *prob)
209 .map(|(_, pk)| pk.clone());
210
211 match (internal_key, unspendable_key) {
212 (Some(ref key), _) => Ok((key.clone(), self.translate_unsatisfiable_pk(key))),
213 (_, Some(key)) => Ok((key, self)),
214 _ => Err(CompilerError::NoInternalKey),
215 }
216 }
217
218 #[cfg(feature = "compiler")]
236 pub fn compile_tr(&self, unspendable_key: Option<Pk>) -> Result<Descriptor<Pk>, CompilerError> {
237 self.is_valid().map_err(CompilerError::PolicyError)?;
238 self.check_binary_ops()?;
239 match self.is_safe_nonmalleable() {
240 (false, _) => Err(CompilerError::TopLevelNonSafe),
241 (_, false) => Err(CompilerError::ImpossibleNonMalleableCompilation),
242 _ => {
243 let (internal_key, policy) = self.clone().extract_key(unspendable_key)?;
244 policy.check_num_tapleaves()?;
245 let tree = Descriptor::new_tr(
246 internal_key,
247 match policy {
248 Policy::Trivial => None,
249 policy => {
250 let mut leaf_compilations: Vec<(OrdF64, Miniscript<Pk, Tap>)> = vec![];
251 for (prob, pol) in policy.tapleaf_probability_iter() {
252 if *pol == Policy::Unsatisfiable {
254 continue;
255 }
256 let compilation = compiler::best_compilation::<Pk, Tap>(pol)?;
257 compilation
258 .sanity_check()
259 .expect("compiler produces sane output");
260 leaf_compilations.push((OrdF64(prob), compilation));
261 }
262 if !leaf_compilations.is_empty() {
263 let tap_tree = with_huffman_tree::<Pk>(leaf_compilations);
264 Some(tap_tree)
265 } else {
266 None
268 }
269 }
270 },
271 )
272 .expect("compiler produces sane output");
273 Ok(tree)
274 }
275 }
276 }
277
278 #[cfg(feature = "compiler")]
297 pub fn compile_tr_private_experimental(
298 &self,
299 unspendable_key: Option<Pk>,
300 ) -> Result<Descriptor<Pk>, Error> {
301 self.is_valid().map_err(Error::ConcretePolicy)?;
302 self.check_binary_ops()?;
303 match self.is_safe_nonmalleable() {
304 (false, _) => Err(Error::from(CompilerError::TopLevelNonSafe)),
305 (_, false) => Err(Error::from(CompilerError::ImpossibleNonMalleableCompilation)),
306 _ => {
307 let (internal_key, policy) = self.clone().extract_key(unspendable_key)?;
308 let tree = Descriptor::new_tr(
309 internal_key,
310 match policy {
311 Policy::Trivial => None,
312 policy => {
313 let leaf_compilations: Vec<_> = policy
314 .enumerate_policy_tree(1.0)
315 .into_iter()
316 .filter(|x| x.1 != Arc::new(Policy::Unsatisfiable))
317 .map(|(prob, pol)| {
318 (
319 OrdF64(prob),
320 compiler::best_compilation(pol.as_ref()).unwrap(),
321 )
322 })
323 .collect();
324
325 if !leaf_compilations.is_empty() {
326 let tap_tree = with_huffman_tree::<Pk>(leaf_compilations);
327 Some(tap_tree)
328 } else {
329 None
331 }
332 }
333 },
334 )?;
335 Ok(tree)
336 }
337 }
338 }
339
340 #[cfg(feature = "compiler")]
351 pub fn compile_to_descriptor<Ctx: ScriptContext>(
352 &self,
353 desc_ctx: DescriptorCtx<Pk>,
354 ) -> Result<Descriptor<Pk>, Error> {
355 self.is_valid().map_err(Error::ConcretePolicy)?;
356 self.check_binary_ops()?;
357 match self.is_safe_nonmalleable() {
358 (false, _) => Err(Error::from(CompilerError::TopLevelNonSafe)),
359 (_, false) => Err(Error::from(CompilerError::ImpossibleNonMalleableCompilation)),
360 _ => match desc_ctx {
361 DescriptorCtx::Bare => Descriptor::new_bare(compiler::best_compilation(self)?),
362 DescriptorCtx::Sh => Descriptor::new_sh(compiler::best_compilation(self)?),
363 DescriptorCtx::Wsh => Descriptor::new_wsh(compiler::best_compilation(self)?),
364 DescriptorCtx::ShWsh => Descriptor::new_sh_wsh(compiler::best_compilation(self)?),
365 DescriptorCtx::Tr(unspendable_key) => self
366 .compile_tr(unspendable_key)
367 .map_err(Error::CompilerError),
368 },
369 }
370 }
371
372 #[cfg(feature = "compiler")]
380 pub fn compile<Ctx: ScriptContext>(&self) -> Result<Miniscript<Pk, Ctx>, CompilerError> {
381 self.is_valid()?;
382 self.check_binary_ops()?;
383 match self.is_safe_nonmalleable() {
384 (false, _) => Err(CompilerError::TopLevelNonSafe),
385 (_, false) => Err(CompilerError::ImpossibleNonMalleableCompilation),
386 _ => compiler::best_compilation(self),
387 }
388 }
389}
390
391#[cfg(feature = "compiler")]
392impl<Pk: MiniscriptKey> Policy<Pk> {
393 #[cfg(feature = "compiler")]
399 fn enumerate_pol(&self, prob: f64) -> Vec<(f64, Arc<Self>)> {
400 match self {
401 Policy::Or(subs) => {
402 let total_odds = subs.iter().fold(0, |acc, x| acc + x.0);
403 subs.iter()
404 .map(|(odds, pol)| (prob * *odds as f64 / total_odds as f64, pol.clone()))
405 .collect::<Vec<_>>()
406 }
407 Policy::Thresh(ref thresh) if thresh.is_or() => {
408 let total_odds = thresh.n();
409 thresh
410 .iter()
411 .map(|pol| (prob / total_odds as f64, pol.clone()))
412 .collect::<Vec<_>>()
413 }
414 Policy::Thresh(ref thresh) if !thresh.is_and() => generate_combination(thresh, prob),
415 pol => vec![(prob, Arc::new(pol.clone()))],
416 }
417 }
418
419 #[cfg(feature = "compiler")]
426 fn enumerate_policy_tree(self, prob: f64) -> Vec<(f64, Arc<Self>)> {
427 let mut tapleaf_prob_vec = BTreeSet::<(Reverse<OrdF64>, Arc<Self>)>::new();
428 let mut pol_prob_map = BTreeMap::<Arc<Self>, OrdF64>::new();
433
434 let arc_self = Arc::new(self);
435 tapleaf_prob_vec.insert((Reverse(OrdF64(prob)), Arc::clone(&arc_self)));
436 pol_prob_map.insert(Arc::clone(&arc_self), OrdF64(prob));
437
438 let mut prev_len = 0usize;
442 let mut enum_len = tapleaf_prob_vec.len();
445
446 let mut ret: Vec<(f64, Arc<Self>)> = vec![];
447
448 'outer: loop {
450 let mut prob: Reverse<OrdF64> = Reverse(OrdF64(0.0));
452 let mut curr_policy: Arc<Self> = Arc::new(Policy::Unsatisfiable);
453 let mut curr_pol_replace_vec: Vec<(f64, Arc<Self>)> = vec![];
454 let mut no_more_enum = false;
455
456 let mut to_del: Vec<(f64, Arc<Self>)> = vec![];
459 'inner: for (i, (p, pol)) in tapleaf_prob_vec.iter().enumerate() {
460 curr_pol_replace_vec = pol.enumerate_pol(p.0 .0);
461 enum_len += curr_pol_replace_vec.len() - 1; assert!(prev_len <= enum_len);
463
464 if prev_len < enum_len {
465 prob = *p;
467 curr_policy = Arc::clone(pol);
468 break 'inner;
469 } else if i == tapleaf_prob_vec.len() - 1 {
470 no_more_enum = true;
473 } else {
474 to_del.push((p.0 .0, Arc::clone(pol)));
478 }
479 }
480
481 if enum_len > MAX_COMPILATION_LEAVES || no_more_enum {
483 for (p, pol) in tapleaf_prob_vec.into_iter() {
484 ret.push((p.0 .0, pol));
485 }
486 break 'outer;
487 }
488
489 assert!(tapleaf_prob_vec.remove(&(prob, curr_policy.clone())));
494
495 for (p, pol) in to_del {
497 assert!(tapleaf_prob_vec.remove(&(Reverse(OrdF64(p)), pol.clone())));
498 ret.push((p, pol.clone()));
499 }
500
501 for (p, policy) in curr_pol_replace_vec {
503 match pol_prob_map.get(&policy) {
504 Some(prev_prob) => {
505 assert!(tapleaf_prob_vec.remove(&(Reverse(*prev_prob), policy.clone())));
506 tapleaf_prob_vec.insert((Reverse(OrdF64(prev_prob.0 + p)), policy.clone()));
507 pol_prob_map.insert(policy.clone(), OrdF64(prev_prob.0 + p));
508 }
509 None => {
510 tapleaf_prob_vec.insert((Reverse(OrdF64(p)), policy.clone()));
511 pol_prob_map.insert(policy.clone(), OrdF64(p));
512 }
513 }
514 }
515 prev_len = enum_len;
517 }
518
519 ret
520 }
521}
522
523impl<Pk: MiniscriptKey> ForEachKey<Pk> for Policy<Pk> {
524 fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool {
525 self.pre_order_iter().all(|policy| match policy {
526 Policy::Key(ref pk) => pred(pk),
527 _ => true,
528 })
529 }
530}
531
532impl<Pk: MiniscriptKey> Policy<Pk> {
533 pub fn translate_pk<T>(&self, t: &mut T) -> Result<Policy<T::TargetPk>, T::Error>
537 where
538 T: Translator<Pk>,
539 {
540 use Policy::*;
541
542 let mut translated = vec![];
543 for data in self.rtl_post_order_iter() {
544 let new_policy = match data.node {
545 Unsatisfiable => Unsatisfiable,
546 Trivial => Trivial,
547 Key(ref pk) => t.pk(pk).map(Key)?,
548 Sha256(ref h) => t.sha256(h).map(Sha256)?,
549 Hash256(ref h) => t.hash256(h).map(Hash256)?,
550 Ripemd160(ref h) => t.ripemd160(h).map(Ripemd160)?,
551 Hash160(ref h) => t.hash160(h).map(Hash160)?,
552 Older(ref n) => Older(*n),
553 After(ref n) => After(*n),
554 And(ref subs) => And((0..subs.len()).map(|_| translated.pop().unwrap()).collect()),
555 Or(ref subs) => Or(subs
556 .iter()
557 .map(|(prob, _)| (*prob, translated.pop().unwrap()))
558 .collect()),
559 Thresh(ref thresh) => Thresh(thresh.map_ref(|_| translated.pop().unwrap())),
560 };
561 translated.push(Arc::new(new_policy));
562 }
563 let root_node = translated.pop().unwrap();
565 Ok(Arc::try_unwrap(root_node).unwrap())
567 }
568
569 pub fn translate_unsatisfiable_pk(self, key: &Pk) -> Policy<Pk> {
571 use Policy::*;
572
573 let mut translated = vec![];
574 for data in Arc::new(self).rtl_post_order_iter() {
575 let new_policy = match data.node.as_ref() {
576 Policy::Key(ref k) if k.clone() == *key => Some(Policy::Unsatisfiable),
577 And(ref subs) => {
578 Some(And((0..subs.len()).map(|_| translated.pop().unwrap()).collect()))
579 }
580 Or(ref subs) => Some(Or(subs
581 .iter()
582 .map(|(prob, _)| (*prob, translated.pop().unwrap()))
583 .collect())),
584 Thresh(ref thresh) => Some(Thresh(thresh.map_ref(|_| translated.pop().unwrap()))),
585 _ => None,
586 };
587 match new_policy {
588 Some(new_policy) => translated.push(Arc::new(new_policy)),
589 None => translated.push(Arc::clone(data.node)),
590 }
591 }
592 let root_node = translated.pop().unwrap();
594 Arc::try_unwrap(root_node).unwrap()
596 }
597
598 pub fn keys(&self) -> Vec<&Pk> {
600 self.pre_order_iter()
601 .filter_map(|policy| match policy {
602 Policy::Key(ref pk) => Some(pk),
603 _ => None,
604 })
605 .collect()
606 }
607
608 #[cfg(feature = "compiler")]
611 fn num_tap_leaves(&self) -> usize { self.tapleaf_probability_iter().count() }
612
613 #[cfg(feature = "compiler")]
615 fn check_num_tapleaves(&self) -> Result<(), CompilerError> {
616 let n = self.num_tap_leaves();
617 if n > MAX_COMPILATION_LEAVES {
618 return Err(CompilerError::TooManyTapleaves { n, max: MAX_COMPILATION_LEAVES });
619 }
620 Ok(())
621 }
622
623 pub fn check_duplicate_keys(&self) -> Result<(), PolicyError> {
625 let pks = self.keys();
626 let pks_len = pks.len();
627 let unique_pks_len = pks.into_iter().collect::<BTreeSet<_>>().len();
628
629 if pks_len > unique_pks_len {
630 Err(PolicyError::DuplicatePubKeys)
631 } else {
632 Ok(())
633 }
634 }
635
636 pub fn check_timelocks(&self) -> Result<(), PolicyError> {
644 let aggregated_timelock_info = self.timelock_info();
645 if aggregated_timelock_info.contains_combination {
646 Err(PolicyError::HeightTimelockCombination)
647 } else {
648 Ok(())
649 }
650 }
651
652 fn timelock_info(&self) -> TimelockInfo {
659 use Policy::*;
660
661 let mut infos = vec![];
662 for data in self.rtl_post_order_iter() {
663 let info = match data.node {
664 Policy::After(ref t) => TimelockInfo {
665 csv_with_height: false,
666 csv_with_time: false,
667 cltv_with_height: absolute::LockTime::from(*t).is_block_height(),
668 cltv_with_time: absolute::LockTime::from(*t).is_block_time(),
669 contains_combination: false,
670 },
671 Policy::Older(ref t) => TimelockInfo {
672 csv_with_height: t.is_height_locked(),
673 csv_with_time: t.is_time_locked(),
674 cltv_with_height: false,
675 cltv_with_time: false,
676 contains_combination: false,
677 },
678 And(ref subs) => {
679 let iter = (0..subs.len()).map(|_| infos.pop().unwrap());
680 TimelockInfo::combine_threshold(subs.len(), iter)
681 }
682 Or(ref subs) => {
683 let iter = (0..subs.len()).map(|_| infos.pop().unwrap());
684 TimelockInfo::combine_threshold(1, iter)
685 }
686 Thresh(ref thresh) => {
687 let iter = (0..thresh.n()).map(|_| infos.pop().unwrap());
688 TimelockInfo::combine_threshold(thresh.k(), iter)
689 }
690 _ => TimelockInfo::default(),
691 };
692 infos.push(info);
693 }
694 infos.pop().unwrap()
696 }
697
698 pub fn is_valid(&self) -> Result<(), PolicyError> {
703 self.check_timelocks()?;
704 self.check_duplicate_keys()?;
705 Ok(())
706 }
707
708 pub fn is_safe_nonmalleable(&self) -> (bool, bool) {
716 use Policy::*;
717
718 let mut acc = vec![];
719 for data in self.rtl_post_order_iter() {
720 let new = match data.node {
721 Unsatisfiable | Trivial | Key(_) => (true, true),
722 Sha256(_) | Hash256(_) | Ripemd160(_) | Hash160(_) | After(_) | Older(_) => {
723 (false, true)
724 }
725 And(ref subs) => {
726 let (atleast_one_safe, all_non_mall) = (0..subs.len())
727 .map(|_| acc.pop().unwrap())
728 .fold((false, true), |acc, x: (bool, bool)| (acc.0 || x.0, acc.1 && x.1));
729 (atleast_one_safe, all_non_mall)
730 }
731 Or(ref subs) => {
732 let (all_safe, atleast_one_safe, all_non_mall) = (0..subs.len())
733 .map(|_| acc.pop().unwrap())
734 .fold((true, false, true), |acc, x| {
735 (acc.0 && x.0, acc.1 || x.0, acc.2 && x.1)
736 });
737 (all_safe, atleast_one_safe && all_non_mall)
738 }
739 Thresh(ref thresh) => {
740 let (safe_count, non_mall_count) = (0..thresh.n())
741 .map(|_| acc.pop().unwrap())
742 .fold((0, 0), |(safe_count, non_mall_count), (safe, non_mall)| {
743 (safe_count + safe as usize, non_mall_count + non_mall as usize)
744 });
745 (
746 safe_count >= (thresh.n() - thresh.k() + 1),
747 non_mall_count == thresh.n() && safe_count >= (thresh.n() - thresh.k()),
748 )
749 }
750 };
751 acc.push(new);
752 }
753 acc.pop().unwrap()
755 }
756}
757
758impl<Pk: MiniscriptKey> fmt::Debug for Policy<Pk> {
759 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
760 match *self {
761 Policy::Unsatisfiable => f.write_str("UNSATISFIABLE()"),
762 Policy::Trivial => f.write_str("TRIVIAL()"),
763 Policy::Key(ref pk) => write!(f, "pk({:?})", pk),
764 Policy::After(n) => write!(f, "after({})", n),
765 Policy::Older(n) => write!(f, "older({})", n),
766 Policy::Sha256(ref h) => write!(f, "sha256({})", h),
767 Policy::Hash256(ref h) => write!(f, "hash256({})", h),
768 Policy::Ripemd160(ref h) => write!(f, "ripemd160({})", h),
769 Policy::Hash160(ref h) => write!(f, "hash160({})", h),
770 Policy::And(ref subs) => {
771 f.write_str("and(")?;
772 if !subs.is_empty() {
773 write!(f, "{:?}", subs[0])?;
774 for sub in &subs[1..] {
775 write!(f, ",{:?}", sub)?;
776 }
777 }
778 f.write_str(")")
779 }
780 Policy::Or(ref subs) => {
781 f.write_str("or(")?;
782 if !subs.is_empty() {
783 write!(f, "{}@{:?}", subs[0].0, subs[0].1)?;
784 for sub in &subs[1..] {
785 write!(f, ",{}@{:?}", sub.0, sub.1)?;
786 }
787 }
788 f.write_str(")")
789 }
790 Policy::Thresh(ref thresh) => fmt::Debug::fmt(&thresh.debug("thresh", true), f),
791 }
792 }
793}
794
795impl<Pk: MiniscriptKey> fmt::Display for Policy<Pk> {
796 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
797 match *self {
798 Policy::Unsatisfiable => f.write_str("UNSATISFIABLE"),
799 Policy::Trivial => f.write_str("TRIVIAL"),
800 Policy::Key(ref pk) => write!(f, "pk({})", pk),
801 Policy::After(n) => write!(f, "after({})", n),
802 Policy::Older(n) => write!(f, "older({})", n),
803 Policy::Sha256(ref h) => write!(f, "sha256({})", h),
804 Policy::Hash256(ref h) => write!(f, "hash256({})", h),
805 Policy::Ripemd160(ref h) => write!(f, "ripemd160({})", h),
806 Policy::Hash160(ref h) => write!(f, "hash160({})", h),
807 Policy::And(ref subs) => {
808 f.write_str("and(")?;
809 if !subs.is_empty() {
810 write!(f, "{}", subs[0])?;
811 for sub in &subs[1..] {
812 write!(f, ",{}", sub)?;
813 }
814 }
815 f.write_str(")")
816 }
817 Policy::Or(ref subs) => {
818 f.write_str("or(")?;
819 if !subs.is_empty() {
820 write!(f, "{}@{}", subs[0].0, subs[0].1)?;
821 for sub in &subs[1..] {
822 write!(f, ",{}@{}", sub.0, sub.1)?;
823 }
824 }
825 f.write_str(")")
826 }
827 Policy::Thresh(ref thresh) => fmt::Display::fmt(&thresh.display("thresh", true), f),
828 }
829 }
830}
831
832impl<Pk: FromStrKey> str::FromStr for Policy<Pk> {
833 type Err = Error;
834 fn from_str(s: &str) -> Result<Policy<Pk>, Error> {
835 let tree = expression::Tree::from_str(s)?;
836 let policy: Policy<Pk> = FromTree::from_tree(tree.root())?;
837 policy.check_timelocks().map_err(Error::ConcretePolicy)?;
838 Ok(policy)
839 }
840}
841
842serde_string_impl_pk!(Policy, "a miniscript concrete policy");
843
844impl<Pk: FromStrKey> expression::FromTree for Policy<Pk> {
845 fn from_tree(root: expression::TreeIterItem) -> Result<Policy<Pk>, Error> {
846 root.verify_no_curly_braces()
847 .map_err(From::from)
848 .map_err(Error::Parse)?;
849
850 let mut stack = Vec::<(usize, _)>::with_capacity(128);
851 for node in root.pre_order_iter().rev() {
852 let allow_prob;
853 if let Some(parent) = node.parent() {
858 if parent.n_children() == 1 {
859 continue;
860 }
861 let (_, parent_name) = parent
862 .name_separated('@')
863 .map_err(From::from)
864 .map_err(Error::Parse)?;
865 if node.is_first_child() && parent_name == "thresh" {
866 continue;
867 }
868 allow_prob = parent_name == "or";
869 } else {
870 allow_prob = false;
871 }
872
873 let (frag_prob, frag_name) = if allow_prob {
875 node.name_separated('@')
876 .map_err(From::from)
877 .map_err(Error::Parse)?
878 } else {
879 (None, node.name())
880 };
881
882 let frag_prob = match frag_prob {
883 None => 1,
884 Some(s) => expression::parse_num_nonzero(s, "fragment probability")
885 .map_err(From::from)
886 .map_err(Error::Parse)? as usize,
887 };
888
889 let new =
890 match frag_name {
891 "UNSATISFIABLE" => {
892 node.verify_n_children("UNSATISFIABLE", 0..=0)
893 .map_err(From::from)
894 .map_err(Error::Parse)?;
895 Ok(Policy::Unsatisfiable)
896 }
897 "TRIVIAL" => {
898 node.verify_n_children("TRIVIAL", 0..=0)
899 .map_err(From::from)
900 .map_err(Error::Parse)?;
901 Ok(Policy::Trivial)
902 }
903 "pk" => node
904 .verify_terminal_parent("pk", "public key")
905 .map(Policy::Key)
906 .map_err(Error::Parse),
907 "after" => node.verify_after().map_err(Error::Parse).map(Policy::After),
908 "older" => node.verify_older().map_err(Error::Parse).map(Policy::Older),
909 "sha256" => node
910 .verify_terminal_parent("sha256", "hash")
911 .map(Policy::Sha256)
912 .map_err(Error::Parse),
913 "hash256" => node
914 .verify_terminal_parent("hash256", "hash")
915 .map(Policy::Hash256)
916 .map_err(Error::Parse),
917 "ripemd160" => node
918 .verify_terminal_parent("ripemd160", "hash")
919 .map(Policy::Ripemd160)
920 .map_err(Error::Parse),
921 "hash160" => node
922 .verify_terminal_parent("hash160", "hash")
923 .map(Policy::Hash160)
924 .map_err(Error::Parse),
925 "and" => {
926 node.verify_n_children("and", 2..=2)
927 .map_err(From::from)
928 .map_err(Error::Parse)?;
929 Ok(Policy::And(vec![stack.pop().unwrap().1, stack.pop().unwrap().1]))
930 }
931 "or" => {
932 node.verify_n_children("or", 2..=2)
933 .map_err(From::from)
934 .map_err(Error::Parse)?;
935 Ok(Policy::Or(vec![stack.pop().unwrap(), stack.pop().unwrap()]))
936 }
937 "thresh" => node
938 .verify_threshold(|_| Ok(stack.pop().unwrap().1))
939 .map(Self::Thresh),
940 x => Err(Error::Parse(crate::ParseError::Tree(
941 crate::ParseTreeError::UnknownName { name: x.to_owned() },
942 ))),
943 }?;
944
945 stack.push((frag_prob, Arc::new(new)));
946 }
947
948 assert_eq!(stack.len(), 1);
949 Ok(Arc::try_unwrap(stack.pop().unwrap().1).unwrap())
950 }
951}
952
953#[cfg(feature = "compiler")]
955fn with_huffman_tree<Pk: MiniscriptKey>(ms: Vec<(OrdF64, Miniscript<Pk, Tap>)>) -> TapTree<Pk> {
956 let mut node_weights = BinaryHeap::<(Reverse<OrdF64>, TapTree<Pk>)>::new();
957 for (prob, script) in ms {
958 node_weights.push((Reverse(prob), TapTree::leaf(script)));
959 }
960 assert_ne!(node_weights.len(), 0, "empty Miniscript compilation");
961 while node_weights.len() > 1 {
962 let (p1, s1) = node_weights.pop().expect("len must at least be two");
963 let (p2, s2) = node_weights.pop().expect("len must at least be two");
964
965 let p = (p1.0).0 + (p2.0).0;
966 node_weights.push((
967 Reverse(OrdF64(p)),
968 TapTree::combine(s1, s2)
969 .expect("huffman tree cannot produce depth > 128 given sane weights"),
970 ));
971 }
972
973 debug_assert!(node_weights.len() == 1);
974 node_weights
975 .pop()
976 .expect("huffman tree algorithm is broken")
977 .1
978}
979
980#[cfg(feature = "compiler")]
988fn generate_combination<Pk: MiniscriptKey>(
989 thresh: &Threshold<Arc<Policy<Pk>>, 0>,
990 prob: f64,
991) -> Vec<(f64, Arc<Policy<Pk>>)> {
992 debug_assert!(thresh.k() < thresh.n());
993
994 let prob_over_n = prob / thresh.n() as f64;
995 let mut ret: Vec<(f64, Arc<Policy<Pk>>)> = vec![];
996 for i in 0..thresh.n() {
997 let thresh_less_1 = Threshold::from_iter(
998 thresh.k(),
999 thresh
1000 .iter()
1001 .enumerate()
1002 .filter_map(|(j, sub)| if j != i { Some(Arc::clone(sub)) } else { None }),
1003 )
1004 .expect("k is strictly less than n, so (k, n-1) is a valid threshold");
1005 ret.push((prob_over_n, Arc::new(Policy::Thresh(thresh_less_1))));
1006 }
1007 ret
1008}
1009
1010#[derive(Clone)]
1016pub enum TreeChildren<'a, Pk: MiniscriptKey> {
1017 And(&'a [Arc<Policy<Pk>>]),
1019 Or(&'a [(usize, Arc<Policy<Pk>>)]),
1021}
1022
1023impl<'a, Pk: MiniscriptKey> TreeLike for &'a Policy<Pk> {
1024 type NaryChildren = TreeChildren<'a, Pk>;
1025
1026 fn nary_len(tc: &Self::NaryChildren) -> usize {
1027 match tc {
1028 TreeChildren::And(sl) => sl.len(),
1029 TreeChildren::Or(sl) => sl.len(),
1030 }
1031 }
1032 fn nary_index(tc: Self::NaryChildren, idx: usize) -> Self {
1033 match tc {
1034 TreeChildren::And(sl) => &sl[idx],
1035 TreeChildren::Or(sl) => &sl[idx].1,
1036 }
1037 }
1038
1039 fn as_node(&self) -> Tree<Self, Self::NaryChildren> {
1040 use Policy::*;
1041
1042 match *self {
1043 Unsatisfiable | Trivial | Key(_) | After(_) | Older(_) | Sha256(_) | Hash256(_)
1044 | Ripemd160(_) | Hash160(_) => Tree::Nullary,
1045 And(ref subs) => Tree::Nary(TreeChildren::And(subs)),
1046 Or(ref v) => Tree::Nary(TreeChildren::Or(v)),
1047 Thresh(ref thresh) => Tree::Nary(TreeChildren::And(thresh.data())),
1048 }
1049 }
1050}
1051
1052impl<'a, Pk: MiniscriptKey> TreeLike for &'a Arc<Policy<Pk>> {
1053 type NaryChildren = TreeChildren<'a, Pk>;
1054
1055 fn nary_len(tc: &Self::NaryChildren) -> usize {
1056 match tc {
1057 TreeChildren::And(sl) => sl.len(),
1058 TreeChildren::Or(sl) => sl.len(),
1059 }
1060 }
1061 fn nary_index(tc: Self::NaryChildren, idx: usize) -> Self {
1062 match tc {
1063 TreeChildren::And(sl) => &sl[idx],
1064 TreeChildren::Or(sl) => &sl[idx].1,
1065 }
1066 }
1067
1068 fn as_node(&self) -> Tree<Self, Self::NaryChildren> {
1069 use Policy::*;
1070
1071 match ***self {
1072 Unsatisfiable | Trivial | Key(_) | After(_) | Older(_) | Sha256(_) | Hash256(_)
1073 | Ripemd160(_) | Hash160(_) => Tree::Nullary,
1074 And(ref subs) => Tree::Nary(TreeChildren::And(subs)),
1075 Or(ref v) => Tree::Nary(TreeChildren::Or(v)),
1076 Thresh(ref thresh) => Tree::Nary(TreeChildren::And(thresh.data())),
1077 }
1078 }
1079}
1080
1081#[cfg(all(test, feature = "compiler"))]
1082mod compiler_tests {
1083 use core::str::FromStr;
1084
1085 use super::*;
1086 use crate::policy::Concrete;
1087
1088 #[test]
1089 fn test_gen_comb() {
1090 let policies: Vec<Arc<Concrete<String>>> = vec!["pk(A)", "pk(B)", "pk(C)", "pk(D)"]
1091 .into_iter()
1092 .map(|st| policy_str!("{}", st))
1093 .map(Arc::new)
1094 .collect();
1095 let thresh = Threshold::new(2, policies).unwrap();
1096
1097 let combinations = generate_combination(&thresh, 1.0);
1098
1099 let comb_a: Vec<Policy<String>> = vec![
1100 policy_str!("pk(B)"),
1101 policy_str!("pk(C)"),
1102 policy_str!("pk(D)"),
1103 ];
1104 let comb_b: Vec<Policy<String>> = vec![
1105 policy_str!("pk(A)"),
1106 policy_str!("pk(C)"),
1107 policy_str!("pk(D)"),
1108 ];
1109 let comb_c: Vec<Policy<String>> = vec![
1110 policy_str!("pk(A)"),
1111 policy_str!("pk(B)"),
1112 policy_str!("pk(D)"),
1113 ];
1114 let comb_d: Vec<Policy<String>> = vec![
1115 policy_str!("pk(A)"),
1116 policy_str!("pk(B)"),
1117 policy_str!("pk(C)"),
1118 ];
1119 let expected_comb = vec![comb_a, comb_b, comb_c, comb_d]
1120 .into_iter()
1121 .map(|sub_pol| {
1122 let expected_thresh =
1123 Threshold::from_iter(2, sub_pol.into_iter().map(Arc::new)).unwrap();
1124 (0.25, Arc::new(Policy::Thresh(expected_thresh)))
1125 })
1126 .collect::<Vec<_>>();
1127 assert_eq!(combinations, expected_comb);
1128 }
1129
1130 #[test]
1131 fn test_tr_pk_only() {
1132 let policy: Policy<String> = policy_str!("pk(A)");
1133 let desc = policy.compile_tr(None).unwrap();
1134 assert_eq!(desc.to_string(), "tr(A)#xyg3grex");
1136 }
1137}
1138
1139#[cfg(test)]
1140mod tests {
1141 use std::str::FromStr;
1142
1143 use super::*;
1144
1145 #[test]
1146 fn for_each_key_count_keys() {
1147 let liquid_pol = Policy::<String>::from_str(
1148 "or(and(older(4096),thresh(2,pk(A),pk(B),pk(C))),thresh(11,pk(F1),pk(F2),pk(F3),pk(F4),pk(F5),pk(F6),pk(F7),pk(F8),pk(F9),pk(F10),pk(F11),pk(F12),pk(F13),pk(F14)))").unwrap();
1149 let mut count = 0;
1150 assert!(liquid_pol.for_each_key(|_| {
1151 count += 1;
1152 true
1153 }));
1154 assert_eq!(count, 17);
1155 }
1156
1157 #[test]
1158 fn for_each_key_fails_predicate() {
1159 let policy =
1160 Policy::<String>::from_str("or(and(pk(key0),pk(key1)),pk(oddnamedkey))").unwrap();
1161 assert!(!policy.for_each_key(|k| k.starts_with("key")));
1162 }
1163
1164 #[test]
1165 fn tranaslate_pk() {
1166 pub struct TestTranslator;
1167 impl Translator<String> for TestTranslator {
1168 type TargetPk = String;
1169 type Error = core::convert::Infallible;
1170
1171 fn pk(&mut self, pk: &String) -> Result<String, Self::Error> {
1172 let new = format!("NEW-{}", pk);
1173 Ok(new.to_string())
1174 }
1175 fn sha256(&mut self, hash: &String) -> Result<String, Self::Error> {
1176 Ok(hash.to_string())
1177 }
1178 fn hash256(&mut self, hash: &String) -> Result<String, Self::Error> {
1179 Ok(hash.to_string())
1180 }
1181 fn ripemd160(&mut self, hash: &String) -> Result<String, Self::Error> {
1182 Ok(hash.to_string())
1183 }
1184 fn hash160(&mut self, hash: &String) -> Result<String, Self::Error> {
1185 Ok(hash.to_string())
1186 }
1187 }
1188 let policy = Policy::<String>::from_str("or(and(pk(A),pk(B)),pk(C))").unwrap();
1189 let mut t = TestTranslator;
1190
1191 let want = Policy::<String>::from_str("or(and(pk(NEW-A),pk(NEW-B)),pk(NEW-C))").unwrap();
1192 let got = policy
1193 .translate_pk(&mut t)
1194 .expect("failed to translate keys");
1195
1196 assert_eq!(got, want);
1197 }
1198
1199 #[test]
1200 fn translate_unsatisfiable_pk() {
1201 let policy = Policy::<String>::from_str("or(and(pk(A),pk(B)),pk(C))").unwrap();
1202
1203 let want = Policy::<String>::from_str("or(and(pk(A),UNSATISFIABLE),pk(C))").unwrap();
1204 let got = policy.translate_unsatisfiable_pk(&"B".to_string());
1205
1206 assert_eq!(got, want);
1207 }
1208
1209 #[test]
1210 fn keys() {
1211 let policy = Policy::<String>::from_str("or(and(pk(A),pk(B)),pk(C))").unwrap();
1212
1213 let want = vec!["A", "B", "C"];
1214 let got = policy.keys();
1215
1216 assert_eq!(got, want);
1217 }
1218
1219 #[test]
1220 #[cfg(feature = "compiler")]
1221 fn num_tap_leaves() {
1222 let policy = Policy::<String>::from_str("or(and(pk(A),pk(B)),pk(C))").unwrap();
1223 assert_eq!(policy.num_tap_leaves(), 2);
1224 }
1225
1226 #[test]
1227 #[should_panic]
1228 fn check_timelocks() {
1229 let _ = Policy::<String>::from_str("and(after(10),after(500000000))").unwrap();
1231 }
1232
1233 #[test]
1234 fn parse_zero_disjunction() {
1235 "or(0@pk(09),0@TRIVIAL)"
1236 .parse::<Policy<String>>()
1237 .unwrap_err();
1238 }
1239}