1use std::{
2 collections::HashMap,
3 fmt::{Debug, Display},
4 sync::Arc,
5};
6
7use accurate::{sum::Klein, traits::*};
8use auto_ops::*;
9use dyn_clone::DynClone;
10use fastrand::Rng;
11use ganesh::{samplers::ESSMove, Ensemble, Function, Minimizer, Sampler, Status};
12use laddu_core::{
13 amplitudes::{central_difference, AmplitudeValues, Evaluator, GradientValues, Model},
14 data::Dataset,
15 resources::Parameters,
16 Complex, DVector, Float, LadduError,
17};
18
19#[cfg(feature = "mpi")]
20use laddu_core::mpi::LadduMPI;
21
22#[cfg(feature = "mpi")]
23use mpi::{datatype::PartitionMut, topology::SimpleCommunicator, traits::*};
24
25#[cfg(feature = "python")]
26use crate::ganesh_ext::py_ganesh::{
27 py_parse_mcmc_options, py_parse_minimizer_options, PyEnsemble, PyStatus,
28};
29#[cfg(feature = "python")]
30use laddu_python::{
31 amplitudes::{PyEvaluator, PyModel},
32 data::PyDataset,
33};
34#[cfg(feature = "python")]
35use numpy::PyArray1;
36#[cfg(feature = "python")]
37use pyo3::{
38 exceptions::PyTypeError,
39 prelude::*,
40 types::{PyDict, PyList},
41};
42#[cfg(feature = "rayon")]
43use rayon::{prelude::*, ThreadPool, ThreadPoolBuilder};
44
45use crate::ganesh_ext::{MCMCOptions, MinimizerOptions};
46
47pub trait LikelihoodTerm: DynClone + Send + Sync {
50 fn evaluate(&self, parameters: &[Float]) -> Float;
52 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
54 central_difference(parameters, |parameters: &[Float]| self.evaluate(parameters))
55 }
56 fn parameters(&self) -> Vec<String>;
58}
59
60dyn_clone::clone_trait_object!(LikelihoodTerm);
61
62#[cfg(feature = "python")]
69#[pyclass(name = "LikelihoodTerm", module = "laddu")]
70#[derive(Clone)]
71pub struct PyLikelihoodTerm(pub Box<dyn LikelihoodTerm>);
72
73#[derive(Clone)]
75pub struct NLL {
76 pub data_evaluator: Evaluator,
78 pub accmc_evaluator: Evaluator,
80}
81
82impl NLL {
83 pub fn new(model: &Model, ds_data: &Arc<Dataset>, ds_accmc: &Arc<Dataset>) -> Box<Self> {
86 Self {
87 data_evaluator: model.load(ds_data),
88 accmc_evaluator: model.load(ds_accmc),
89 }
90 .into()
91 }
92 pub fn activate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
94 self.data_evaluator.activate(&name)?;
95 self.accmc_evaluator.activate(&name)
96 }
97 pub fn activate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
99 self.data_evaluator.activate_many(names)?;
100 self.accmc_evaluator.activate_many(names)
101 }
102 pub fn activate_all(&self) {
104 self.data_evaluator.activate_all();
105 self.accmc_evaluator.activate_all();
106 }
107 pub fn deactivate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
109 self.data_evaluator.deactivate(&name)?;
110 self.accmc_evaluator.deactivate(&name)
111 }
112 pub fn deactivate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
114 self.data_evaluator.deactivate_many(names)?;
115 self.accmc_evaluator.deactivate_many(names)
116 }
117 pub fn deactivate_all(&self) {
119 self.data_evaluator.deactivate_all();
120 self.accmc_evaluator.deactivate_all();
121 }
122 pub fn isolate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
124 self.data_evaluator.isolate(&name)?;
125 self.accmc_evaluator.isolate(&name)
126 }
127 pub fn isolate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
129 self.data_evaluator.isolate_many(names)?;
130 self.accmc_evaluator.isolate_many(names)
131 }
132
133 pub fn project_local(
142 &self,
143 parameters: &[Float],
144 mc_evaluator: Option<Evaluator>,
145 ) -> Vec<Float> {
146 let (mc_dataset, result) = if let Some(mc_evaluator) = mc_evaluator {
147 (
148 mc_evaluator.dataset.clone(),
149 mc_evaluator.evaluate_local(parameters),
150 )
151 } else {
152 (
153 self.accmc_evaluator.dataset.clone(),
154 self.accmc_evaluator.evaluate_local(parameters),
155 )
156 };
157 let n_mc = self.accmc_evaluator.dataset.n_events();
158 #[cfg(feature = "rayon")]
159 let output: Vec<Float> = result
160 .par_iter()
161 .zip(mc_dataset.events.par_iter())
162 .map(|(l, e)| e.weight * l.re / n_mc as Float)
163 .collect();
164
165 #[cfg(not(feature = "rayon"))]
166 let output: Vec<Float> = result
167 .iter()
168 .zip(mc_dataset.events.iter())
169 .map(|(l, e)| e.weight * l.re / n_mc as Float)
170 .collect();
171 output
172 }
173
174 #[cfg(feature = "mpi")]
183 pub fn project_mpi(
184 &self,
185 parameters: &[Float],
186 mc_evaluator: Option<Evaluator>,
187 world: &SimpleCommunicator,
188 ) -> Vec<Float> {
189 let n_events = mc_evaluator
190 .as_ref()
191 .unwrap_or(&self.accmc_evaluator)
192 .dataset
193 .n_events();
194 let local_projection = self.project_local(parameters, mc_evaluator);
195 let mut buffer: Vec<Float> = vec![0.0; n_events];
196 let (counts, displs) = world.get_counts_displs(n_events);
197 {
198 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
199 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
200 }
201 buffer
202 }
203
204 pub fn project(&self, parameters: &[Float], mc_evaluator: Option<Evaluator>) -> Vec<Float> {
218 #[cfg(feature = "mpi")]
219 {
220 if let Some(world) = laddu_core::mpi::get_world() {
221 return self.project_mpi(parameters, mc_evaluator, &world);
222 }
223 }
224 self.project_local(parameters, mc_evaluator)
225 }
226
227 pub fn project_gradient_local(
236 &self,
237 parameters: &[Float],
238 mc_evaluator: Option<Evaluator>,
239 ) -> (Vec<Float>, Vec<DVector<Float>>) {
240 let (mc_dataset, result, result_gradient) = if let Some(mc_evaluator) = mc_evaluator {
241 (
242 mc_evaluator.dataset.clone(),
243 mc_evaluator.evaluate_local(parameters),
244 mc_evaluator.evaluate_gradient_local(parameters),
245 )
246 } else {
247 (
248 self.accmc_evaluator.dataset.clone(),
249 self.accmc_evaluator.evaluate_local(parameters),
250 self.accmc_evaluator.evaluate_gradient_local(parameters),
251 )
252 };
253 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
254 #[cfg(feature = "rayon")]
255 {
256 (
257 result
258 .par_iter()
259 .zip(mc_dataset.events.par_iter())
260 .map(|(l, e)| e.weight * l.re / n_mc)
261 .collect(),
262 result_gradient
263 .par_iter()
264 .zip(mc_dataset.events.par_iter())
265 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
266 .collect(),
267 )
268 }
269 #[cfg(not(feature = "rayon"))]
270 {
271 (
272 result
273 .iter()
274 .zip(mc_dataset.events.iter())
275 .map(|(l, e)| e.weight * l.re / n_mc)
276 .collect(),
277 result_gradient
278 .iter()
279 .zip(mc_dataset.events.iter())
280 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
281 .collect(),
282 )
283 }
284 }
285
286 #[cfg(feature = "mpi")]
295 pub fn project_gradient_mpi(
296 &self,
297 parameters: &[Float],
298 mc_evaluator: Option<Evaluator>,
299 world: &SimpleCommunicator,
300 ) -> (Vec<Float>, Vec<DVector<Float>>) {
301 let n_events = mc_evaluator
302 .as_ref()
303 .unwrap_or(&self.accmc_evaluator)
304 .dataset
305 .n_events();
306 let (local_projection, local_gradient_projection) =
307 self.project_gradient_local(parameters, mc_evaluator);
308 let mut projection_result: Vec<Float> = vec![0.0; n_events];
309 let (counts, displs) = world.get_counts_displs(n_events);
310 {
311 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
312 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
313 }
314
315 let flattened_local_gradient_projection = local_gradient_projection
316 .iter()
317 .flat_map(|g| g.data.as_vec().to_vec())
318 .collect::<Vec<Float>>();
319 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
320 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
321 let mut partitioned_flattened_result_buffer =
322 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
323 world.all_gather_varcount_into(
324 &flattened_local_gradient_projection,
325 &mut partitioned_flattened_result_buffer,
326 );
327 let gradient_projection_result = flattened_result_buffer
328 .chunks(parameters.len())
329 .map(DVector::from_row_slice)
330 .collect();
331 (projection_result, gradient_projection_result)
332 }
333 pub fn project_gradient(
347 &self,
348 parameters: &[Float],
349 mc_evaluator: Option<Evaluator>,
350 ) -> (Vec<Float>, Vec<DVector<Float>>) {
351 #[cfg(feature = "mpi")]
352 {
353 if let Some(world) = laddu_core::mpi::get_world() {
354 return self.project_gradient_mpi(parameters, mc_evaluator, &world);
355 }
356 }
357 self.project_gradient_local(parameters, mc_evaluator)
358 }
359
360 pub fn project_with_local<T: AsRef<str>>(
370 &self,
371 parameters: &[Float],
372 names: &[T],
373 mc_evaluator: Option<Evaluator>,
374 ) -> Result<Vec<Float>, LadduError> {
375 if let Some(mc_evaluator) = &mc_evaluator {
376 let current_active_mc = mc_evaluator.resources.read().active.clone();
377 mc_evaluator.isolate_many(names)?;
378 let mc_dataset = mc_evaluator.dataset.clone();
379 let result = mc_evaluator.evaluate_local(parameters);
380 let n_mc = self.accmc_evaluator.dataset.n_events();
381 #[cfg(feature = "rayon")]
382 let output: Vec<Float> = result
383 .par_iter()
384 .zip(mc_dataset.events.par_iter())
385 .map(|(l, e)| e.weight * l.re / n_mc as Float)
386 .collect();
387 #[cfg(not(feature = "rayon"))]
388 let output: Vec<Float> = result
389 .iter()
390 .zip(mc_dataset.events.iter())
391 .map(|(l, e)| e.weight * l.re / n_mc as Float)
392 .collect();
393 mc_evaluator.resources.write().active = current_active_mc;
394 Ok(output)
395 } else {
396 let current_active_data = self.data_evaluator.resources.read().active.clone();
397 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
398 self.isolate_many(names)?;
399 let mc_dataset = &self.accmc_evaluator.dataset;
400 let result = self.accmc_evaluator.evaluate_local(parameters);
401 let n_mc = self.accmc_evaluator.dataset.n_events();
402 #[cfg(feature = "rayon")]
403 let output: Vec<Float> = result
404 .par_iter()
405 .zip(mc_dataset.events.par_iter())
406 .map(|(l, e)| e.weight * l.re / n_mc as Float)
407 .collect();
408 #[cfg(not(feature = "rayon"))]
409 let output: Vec<Float> = result
410 .iter()
411 .zip(mc_dataset.events.iter())
412 .map(|(l, e)| e.weight * l.re / n_mc as Float)
413 .collect();
414 self.data_evaluator.resources.write().active = current_active_data;
415 self.accmc_evaluator.resources.write().active = current_active_accmc;
416 Ok(output)
417 }
418 }
419
420 #[cfg(feature = "mpi")]
430 pub fn project_with_mpi<T: AsRef<str>>(
431 &self,
432 parameters: &[Float],
433 names: &[T],
434 mc_evaluator: Option<Evaluator>,
435 world: &SimpleCommunicator,
436 ) -> Result<Vec<Float>, LadduError> {
437 let n_events = mc_evaluator
438 .as_ref()
439 .unwrap_or(&self.accmc_evaluator)
440 .dataset
441 .n_events();
442 let local_projection = self.project_with_local(parameters, names, mc_evaluator)?;
443 let mut buffer: Vec<Float> = vec![0.0; n_events];
444 let (counts, displs) = world.get_counts_displs(n_events);
445 {
446 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
447 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
448 }
449 Ok(buffer)
450 }
451
452 pub fn project_with<T: AsRef<str>>(
469 &self,
470 parameters: &[Float],
471 names: &[T],
472 mc_evaluator: Option<Evaluator>,
473 ) -> Result<Vec<Float>, LadduError> {
474 #[cfg(feature = "mpi")]
475 {
476 if let Some(world) = laddu_core::mpi::get_world() {
477 return self.project_with_mpi(parameters, names, mc_evaluator, &world);
478 }
479 }
480 self.project_with_local(parameters, names, mc_evaluator)
481 }
482
483 pub fn project_gradient_with_local<T: AsRef<str>>(
494 &self,
495 parameters: &[Float],
496 names: &[T],
497 mc_evaluator: Option<Evaluator>,
498 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
499 if let Some(mc_evaluator) = &mc_evaluator {
500 let current_active_mc = mc_evaluator.resources.read().active.clone();
501 mc_evaluator.isolate_many(names)?;
502 let mc_dataset = mc_evaluator.dataset.clone();
503 let result = mc_evaluator.evaluate_local(parameters);
504 let result_gradient = mc_evaluator.evaluate_gradient(parameters);
505 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
506 #[cfg(feature = "rayon")]
507 let (res, res_gradient) = {
508 (
509 result
510 .par_iter()
511 .zip(mc_dataset.events.par_iter())
512 .map(|(l, e)| e.weight * l.re / n_mc)
513 .collect(),
514 result_gradient
515 .par_iter()
516 .zip(mc_dataset.events.par_iter())
517 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
518 .collect(),
519 )
520 };
521 #[cfg(not(feature = "rayon"))]
522 let (res, res_gradient) = {
523 (
524 result
525 .iter()
526 .zip(mc_dataset.events.iter())
527 .map(|(l, e)| e.weight * l.re / n_mc)
528 .collect(),
529 result_gradient
530 .iter()
531 .zip(mc_dataset.events.iter())
532 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
533 .collect(),
534 )
535 };
536 mc_evaluator.resources.write().active = current_active_mc;
537 Ok((res, res_gradient))
538 } else {
539 let current_active_data = self.data_evaluator.resources.read().active.clone();
540 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
541 self.isolate_many(names)?;
542 let mc_dataset = &self.accmc_evaluator.dataset;
543 let result = self.accmc_evaluator.evaluate_local(parameters);
544 let result_gradient = self.accmc_evaluator.evaluate_gradient(parameters);
545 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
546 #[cfg(feature = "rayon")]
547 let (res, res_gradient) = {
548 (
549 result
550 .par_iter()
551 .zip(mc_dataset.events.par_iter())
552 .map(|(l, e)| e.weight * l.re / n_mc)
553 .collect(),
554 result_gradient
555 .par_iter()
556 .zip(mc_dataset.events.par_iter())
557 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
558 .collect(),
559 )
560 };
561 #[cfg(not(feature = "rayon"))]
562 let (res, res_gradient) = {
563 (
564 result
565 .iter()
566 .zip(mc_dataset.events.iter())
567 .map(|(l, e)| e.weight * l.re / n_mc)
568 .collect(),
569 result_gradient
570 .iter()
571 .zip(mc_dataset.events.iter())
572 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
573 .collect(),
574 )
575 };
576 self.data_evaluator.resources.write().active = current_active_data;
577 self.accmc_evaluator.resources.write().active = current_active_accmc;
578 Ok((res, res_gradient))
579 }
580 }
581
582 #[cfg(feature = "mpi")]
593 pub fn project_gradient_with_mpi<T: AsRef<str>>(
594 &self,
595 parameters: &[Float],
596 names: &[T],
597 mc_evaluator: Option<Evaluator>,
598 world: &SimpleCommunicator,
599 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
600 let n_events = mc_evaluator
601 .as_ref()
602 .unwrap_or(&self.accmc_evaluator)
603 .dataset
604 .n_events();
605 let (local_projection, local_gradient_projection) =
606 self.project_gradient_with_local(parameters, names, mc_evaluator)?;
607 let mut projection_result: Vec<Float> = vec![0.0; n_events];
608 let (counts, displs) = world.get_counts_displs(n_events);
609 {
610 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
611 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
612 }
613
614 let flattened_local_gradient_projection = local_gradient_projection
615 .iter()
616 .flat_map(|g| g.data.as_vec().to_vec())
617 .collect::<Vec<Float>>();
618 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
619 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
620 let mut partitioned_flattened_result_buffer =
621 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
622 world.all_gather_varcount_into(
623 &flattened_local_gradient_projection,
624 &mut partitioned_flattened_result_buffer,
625 );
626 let gradient_projection_result = flattened_result_buffer
627 .chunks(parameters.len())
628 .map(DVector::from_row_slice)
629 .collect();
630 Ok((projection_result, gradient_projection_result))
631 }
632 pub fn project_gradient_with<T: AsRef<str>>(
651 &self,
652 parameters: &[Float],
653 names: &[T],
654 mc_evaluator: Option<Evaluator>,
655 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
656 #[cfg(feature = "mpi")]
657 {
658 if let Some(world) = laddu_core::mpi::get_world() {
659 return self.project_gradient_with_mpi(parameters, names, mc_evaluator, &world);
660 }
661 }
662 self.project_gradient_with_local(parameters, names, mc_evaluator)
663 }
664
665 fn evaluate_local(&self, parameters: &[Float]) -> Float {
666 let data_result = self.data_evaluator.evaluate_local(parameters);
667 let mc_result = self.accmc_evaluator.evaluate_local(parameters);
668 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
669 #[cfg(feature = "rayon")]
670 let data_term: Float = data_result
671 .par_iter()
672 .zip(self.data_evaluator.dataset.events.par_iter())
673 .map(|(l, e)| e.weight * Float::ln(l.re))
674 .parallel_sum_with_accumulator::<Klein<Float>>();
675 #[cfg(feature = "rayon")]
676 let mc_term: Float = mc_result
677 .par_iter()
678 .zip(self.accmc_evaluator.dataset.events.par_iter())
679 .map(|(l, e)| e.weight * l.re)
680 .parallel_sum_with_accumulator::<Klein<Float>>();
681 #[cfg(not(feature = "rayon"))]
682 let data_term: Float = data_result
683 .iter()
684 .zip(self.data_evaluator.dataset.events.iter())
685 .map(|(l, e)| e.weight * Float::ln(l.re))
686 .sum_with_accumulator::<Klein<Float>>();
687 #[cfg(not(feature = "rayon"))]
688 let mc_term: Float = mc_result
689 .iter()
690 .zip(self.accmc_evaluator.dataset.events.iter())
691 .map(|(l, e)| e.weight * l.re)
692 .sum_with_accumulator::<Klein<Float>>();
693 -2.0 * (data_term - mc_term / n_mc)
694 }
695
696 #[cfg(feature = "mpi")]
697 fn evaluate_mpi(&self, parameters: &[Float], world: &SimpleCommunicator) -> Float {
698 let local_evaluation = self.evaluate_local(parameters);
699 let mut buffer: Vec<Float> = vec![0.0; world.size() as usize];
700 world.all_gather_into(&local_evaluation, &mut buffer);
701 buffer.iter().sum()
702 }
703
704 fn evaluate_gradient_local(&self, parameters: &[Float]) -> DVector<Float> {
705 let data_resources = self.data_evaluator.resources.read();
706 let data_parameters = Parameters::new(parameters, &data_resources.constants);
707 let mc_resources = self.accmc_evaluator.resources.read();
708 let mc_parameters = Parameters::new(parameters, &mc_resources.constants);
709 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
710 #[cfg(feature = "rayon")]
711 let data_term: DVector<Float> = self
712 .data_evaluator
713 .dataset
714 .events
715 .par_iter()
716 .zip(data_resources.caches.par_iter())
717 .map(|(event, cache)| {
718 let mut gradient_values =
719 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
720 self.data_evaluator
721 .amplitudes
722 .iter()
723 .zip(data_resources.active.iter())
724 .zip(gradient_values.iter_mut())
725 .for_each(|((amp, active), grad)| {
726 if *active {
727 amp.compute_gradient(&data_parameters, event, cache, grad)
728 }
729 });
730 (
731 event.weight,
732 AmplitudeValues(
733 self.data_evaluator
734 .amplitudes
735 .iter()
736 .zip(data_resources.active.iter())
737 .map(|(amp, active)| {
738 if *active {
739 amp.compute(&data_parameters, event, cache)
740 } else {
741 Complex::ZERO
742 }
743 })
744 .collect(),
745 ),
746 GradientValues(parameters.len(), gradient_values),
747 )
748 })
749 .map(|(weight, amp_vals, grad_vals)| {
750 (
751 weight,
752 self.data_evaluator.expression.evaluate(&_vals),
753 self.data_evaluator
754 .expression
755 .evaluate_gradient(&_vals, &grad_vals),
756 )
757 })
758 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
759 .collect::<Vec<DVector<Float>>>()
760 .iter()
761 .sum(); #[cfg(feature = "rayon")]
763 let mc_term: DVector<Float> = self
764 .accmc_evaluator
765 .dataset
766 .events
767 .par_iter()
768 .zip(mc_resources.caches.par_iter())
769 .map(|(event, cache)| {
770 let mut gradient_values =
771 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
772 self.accmc_evaluator
773 .amplitudes
774 .iter()
775 .zip(mc_resources.active.iter())
776 .zip(gradient_values.iter_mut())
777 .for_each(|((amp, active), grad)| {
778 if *active {
779 amp.compute_gradient(&mc_parameters, event, cache, grad)
780 }
781 });
782 (
783 event.weight,
784 AmplitudeValues(
785 self.accmc_evaluator
786 .amplitudes
787 .iter()
788 .zip(mc_resources.active.iter())
789 .map(|(amp, active)| {
790 if *active {
791 amp.compute(&mc_parameters, event, cache)
792 } else {
793 Complex::ZERO
794 }
795 })
796 .collect(),
797 ),
798 GradientValues(parameters.len(), gradient_values),
799 )
800 })
801 .map(|(weight, amp_vals, grad_vals)| {
802 (
803 weight,
804 self.accmc_evaluator
805 .expression
806 .evaluate_gradient(&_vals, &grad_vals),
807 )
808 })
809 .map(|(w, g)| w * g.map(|gi| gi.re))
810 .collect::<Vec<DVector<Float>>>()
811 .iter()
812 .sum();
813 #[cfg(not(feature = "rayon"))]
814 let data_term: DVector<Float> = self
815 .data_evaluator
816 .dataset
817 .events
818 .iter()
819 .zip(data_resources.caches.iter())
820 .map(|(event, cache)| {
821 let mut gradient_values =
822 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
823 self.data_evaluator
824 .amplitudes
825 .iter()
826 .zip(data_resources.active.iter())
827 .zip(gradient_values.iter_mut())
828 .for_each(|((amp, active), grad)| {
829 if *active {
830 amp.compute_gradient(&data_parameters, event, cache, grad)
831 }
832 });
833 (
834 event.weight,
835 AmplitudeValues(
836 self.data_evaluator
837 .amplitudes
838 .iter()
839 .zip(data_resources.active.iter())
840 .map(|(amp, active)| {
841 if *active {
842 amp.compute(&data_parameters, event, cache)
843 } else {
844 Complex::ZERO
845 }
846 })
847 .collect(),
848 ),
849 GradientValues(parameters.len(), gradient_values),
850 )
851 })
852 .map(|(weight, amp_vals, grad_vals)| {
853 (
854 weight,
855 self.data_evaluator.expression.evaluate(&_vals),
856 self.data_evaluator
857 .expression
858 .evaluate_gradient(&_vals, &grad_vals),
859 )
860 })
861 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
862 .sum();
863 #[cfg(not(feature = "rayon"))]
864 let mc_term: DVector<Float> = self
865 .accmc_evaluator
866 .dataset
867 .events
868 .iter()
869 .zip(mc_resources.caches.iter())
870 .map(|(event, cache)| {
871 let mut gradient_values =
872 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
873 self.accmc_evaluator
874 .amplitudes
875 .iter()
876 .zip(mc_resources.active.iter())
877 .zip(gradient_values.iter_mut())
878 .for_each(|((amp, active), grad)| {
879 if *active {
880 amp.compute_gradient(&mc_parameters, event, cache, grad)
881 }
882 });
883 (
884 event.weight,
885 AmplitudeValues(
886 self.accmc_evaluator
887 .amplitudes
888 .iter()
889 .zip(mc_resources.active.iter())
890 .map(|(amp, active)| {
891 if *active {
892 amp.compute(&mc_parameters, event, cache)
893 } else {
894 Complex::ZERO
895 }
896 })
897 .collect(),
898 ),
899 GradientValues(parameters.len(), gradient_values),
900 )
901 })
902 .map(|(weight, amp_vals, grad_vals)| {
903 (
904 weight,
905 self.accmc_evaluator
906 .expression
907 .evaluate_gradient(&_vals, &grad_vals),
908 )
909 })
910 .map(|(w, g)| w * g.map(|gi| gi.re))
911 .sum();
912 -2.0 * (data_term - mc_term / n_mc)
913 }
914
915 #[cfg(feature = "mpi")]
916 fn evaluate_gradient_mpi(
917 &self,
918 parameters: &[Float],
919 world: &SimpleCommunicator,
920 ) -> DVector<Float> {
921 let local_evaluation_vec = self
922 .evaluate_gradient_local(parameters)
923 .data
924 .as_vec()
925 .to_vec();
926 let mut flattened_result_buffer = vec![0.0; world.size() as usize * parameters.len()];
927 world.all_gather_into(&local_evaluation_vec, &mut flattened_result_buffer);
928 flattened_result_buffer
929 .chunks(parameters.len())
930 .map(DVector::from_row_slice)
931 .sum::<DVector<Float>>()
932 }
933}
934
935impl LikelihoodTerm for NLL {
936 fn parameters(&self) -> Vec<String> {
939 self.data_evaluator
940 .resources
941 .read()
942 .parameters
943 .iter()
944 .cloned()
945 .collect()
946 }
947
948 fn evaluate(&self, parameters: &[Float]) -> Float {
958 #[cfg(feature = "mpi")]
959 {
960 if let Some(world) = laddu_core::mpi::get_world() {
961 return self.evaluate_mpi(parameters, &world);
962 }
963 }
964 self.evaluate_local(parameters)
965 }
966
967 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
973 #[cfg(feature = "mpi")]
974 {
975 if let Some(world) = laddu_core::mpi::get_world() {
976 return self.evaluate_gradient_mpi(parameters, &world);
977 }
978 }
979 self.evaluate_gradient_local(parameters)
980 }
981}
982
983#[cfg(feature = "rayon")]
984impl Function<ThreadPool, LadduError> for NLL {
985 fn evaluate(
986 &self,
987 parameters: &[Float],
988 thread_pool: &mut ThreadPool,
989 ) -> Result<Float, LadduError> {
990 Ok(thread_pool.install(|| LikelihoodTerm::evaluate(self, parameters)))
991 }
992 fn gradient(
993 &self,
994 parameters: &[Float],
995 thread_pool: &mut ThreadPool,
996 ) -> Result<DVector<Float>, LadduError> {
997 Ok(thread_pool.install(|| LikelihoodTerm::evaluate_gradient(self, parameters)))
998 }
999}
1000
1001#[cfg(not(feature = "rayon"))]
1002impl Function<(), LadduError> for NLL {
1003 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
1004 Ok(LikelihoodTerm::evaluate(self, parameters))
1005 }
1006 fn gradient(
1007 &self,
1008 parameters: &[Float],
1009 _user_data: &mut (),
1010 ) -> Result<DVector<Float>, LadduError> {
1011 Ok(LikelihoodTerm::evaluate_gradient(self, parameters))
1012 }
1013}
1014
1015pub(crate) struct LogLikelihood<'a>(&'a NLL);
1016
1017#[cfg(feature = "rayon")]
1018impl Function<ThreadPool, LadduError> for LogLikelihood<'_> {
1019 fn evaluate(
1020 &self,
1021 parameters: &[Float],
1022 thread_pool: &mut ThreadPool,
1023 ) -> Result<Float, LadduError> {
1024 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
1025 }
1026 fn gradient(
1027 &self,
1028 parameters: &[Float],
1029 thread_pool: &mut ThreadPool,
1030 ) -> Result<DVector<Float>, LadduError> {
1031 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
1032 }
1033}
1034
1035#[cfg(not(feature = "rayon"))]
1036impl Function<(), LadduError> for LogLikelihood<'_> {
1037 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
1038 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
1039 }
1040 fn gradient(
1041 &self,
1042 parameters: &[Float],
1043 user_data: &mut (),
1044 ) -> Result<DVector<Float>, LadduError> {
1045 Function::gradient(self.0, parameters, user_data).map(|res| -res)
1046 }
1047}
1048
1049impl NLL {
1050 pub fn minimize(
1053 &self,
1054 p0: &[Float],
1055 bounds: Option<Vec<(Float, Float)>>,
1056 options: Option<MinimizerOptions>,
1057 ) -> Result<Status, LadduError> {
1058 let options = options.unwrap_or_default();
1059 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
1060 .with_max_steps(options.max_steps);
1061 if let Some(bounds) = bounds {
1062 m = m.with_bounds(bounds);
1063 }
1064 for observer in options.observers {
1065 m = m.with_observer(observer);
1066 }
1067 #[cfg(feature = "rayon")]
1068 {
1069 m.minimize(
1070 self,
1071 p0,
1072 &mut ThreadPoolBuilder::new()
1073 .num_threads(options.threads)
1074 .build()
1075 .map_err(LadduError::from)?,
1076 )?;
1077 }
1078 #[cfg(not(feature = "rayon"))]
1079 {
1080 m.minimize(self, p0, &mut ())?;
1081 }
1082 Ok(m.status)
1083 }
1084 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
1086 &self,
1087 p0: T,
1088 n_steps: usize,
1089 options: Option<MCMCOptions>,
1090 rng: Rng,
1091 ) -> Result<Ensemble, LadduError> {
1092 let options = options.unwrap_or(MCMCOptions::new_ess(
1093 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
1094 rng,
1095 ));
1096 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
1097 for observer in options.observers {
1098 m = m.with_observer(observer);
1099 }
1100 let func = LogLikelihood(self);
1101 #[cfg(feature = "rayon")]
1102 {
1103 m.sample(
1104 &func,
1105 &mut ThreadPoolBuilder::new()
1106 .num_threads(options.threads)
1107 .build()
1108 .map_err(LadduError::from)?,
1109 n_steps,
1110 )?;
1111 }
1112 #[cfg(not(feature = "rayon"))]
1113 {
1114 m.sample(&func, &mut (), n_steps)?;
1115 }
1116 Ok(m.ensemble)
1117 }
1118}
1119
1120#[cfg(feature = "python")]
1132#[pyclass(name = "NLL", module = "laddu")]
1133#[derive(Clone)]
1134pub struct PyNLL(pub Box<NLL>);
1135
1136#[cfg(feature = "python")]
1137#[pymethods]
1138impl PyNLL {
1139 #[new]
1140 #[pyo3(signature = (model, ds_data, ds_accmc))]
1141 fn new(model: &PyModel, ds_data: &PyDataset, ds_accmc: &PyDataset) -> Self {
1142 Self(NLL::new(&model.0, &ds_data.0, &ds_accmc.0))
1143 }
1144 #[getter]
1151 fn data(&self) -> PyDataset {
1152 PyDataset(self.0.data_evaluator.dataset.clone())
1153 }
1154 #[getter]
1161 fn accmc(&self) -> PyDataset {
1162 PyDataset(self.0.accmc_evaluator.dataset.clone())
1163 }
1164 fn as_term(&self) -> PyLikelihoodTerm {
1172 PyLikelihoodTerm(self.0.clone())
1173 }
1174 #[getter]
1181 fn parameters(&self) -> Vec<String> {
1182 self.0.parameters()
1183 }
1184 fn activate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1199 if let Ok(string_arg) = arg.extract::<String>() {
1200 self.0.activate(&string_arg)?;
1201 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1202 let vec: Vec<String> = list_arg.extract()?;
1203 self.0.activate_many(&vec)?;
1204 } else {
1205 return Err(PyTypeError::new_err(
1206 "Argument must be either a string or a list of strings",
1207 ));
1208 }
1209 Ok(())
1210 }
1211 fn activate_all(&self) {
1214 self.0.activate_all();
1215 }
1216 fn deactivate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1233 if let Ok(string_arg) = arg.extract::<String>() {
1234 self.0.deactivate(&string_arg)?;
1235 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1236 let vec: Vec<String> = list_arg.extract()?;
1237 self.0.deactivate_many(&vec)?;
1238 } else {
1239 return Err(PyTypeError::new_err(
1240 "Argument must be either a string or a list of strings",
1241 ));
1242 }
1243 Ok(())
1244 }
1245 fn deactivate_all(&self) {
1248 self.0.deactivate_all();
1249 }
1250 fn isolate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1267 if let Ok(string_arg) = arg.extract::<String>() {
1268 self.0.isolate(&string_arg)?;
1269 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1270 let vec: Vec<String> = list_arg.extract()?;
1271 self.0.isolate_many(&vec)?;
1272 } else {
1273 return Err(PyTypeError::new_err(
1274 "Argument must be either a string or a list of strings",
1275 ));
1276 }
1277 Ok(())
1278 }
1279 #[pyo3(signature = (parameters, *, threads=None))]
1303 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
1304 #[cfg(feature = "rayon")]
1305 {
1306 Ok(ThreadPoolBuilder::new()
1307 .num_threads(threads.unwrap_or_else(num_cpus::get))
1308 .build()
1309 .map_err(LadduError::from)?
1310 .install(|| LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters)))
1311 }
1312 #[cfg(not(feature = "rayon"))]
1313 {
1314 Ok(LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters))
1315 }
1316 }
1317 #[pyo3(signature = (parameters, *, threads=None))]
1338 fn evaluate_gradient<'py>(
1339 &self,
1340 py: Python<'py>,
1341 parameters: Vec<Float>,
1342 threads: Option<usize>,
1343 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1344 #[cfg(feature = "rayon")]
1345 {
1346 Ok(PyArray1::from_slice(
1347 py,
1348 ThreadPoolBuilder::new()
1349 .num_threads(threads.unwrap_or_else(num_cpus::get))
1350 .build()
1351 .map_err(LadduError::from)?
1352 .install(|| self.0.evaluate_gradient(¶meters))
1353 .as_slice(),
1354 ))
1355 }
1356 #[cfg(not(feature = "rayon"))]
1357 {
1358 Ok(PyArray1::from_slice(
1359 py,
1360 self.0.evaluate_gradient(¶meters).as_slice(),
1361 ))
1362 }
1363 }
1364 #[pyo3(signature = (parameters, *, mc_evaluator = None, threads=None))]
1391 fn project<'py>(
1392 &self,
1393 py: Python<'py>,
1394 parameters: Vec<Float>,
1395 mc_evaluator: Option<PyEvaluator>,
1396 threads: Option<usize>,
1397 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1398 #[cfg(feature = "rayon")]
1399 {
1400 Ok(PyArray1::from_slice(
1401 py,
1402 ThreadPoolBuilder::new()
1403 .num_threads(threads.unwrap_or_else(num_cpus::get))
1404 .build()
1405 .map_err(LadduError::from)?
1406 .install(|| {
1407 self.0
1408 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1409 })
1410 .as_slice(),
1411 ))
1412 }
1413 #[cfg(not(feature = "rayon"))]
1414 {
1415 Ok(PyArray1::from_slice(
1416 py,
1417 self.0
1418 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1419 .as_slice(),
1420 ))
1421 }
1422 }
1423
1424 #[pyo3(signature = (parameters, arg, *, mc_evaluator = None, threads=None))]
1462 fn project_with<'py>(
1463 &self,
1464 py: Python<'py>,
1465 parameters: Vec<Float>,
1466 arg: &Bound<'_, PyAny>,
1467 mc_evaluator: Option<PyEvaluator>,
1468 threads: Option<usize>,
1469 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1470 let names = if let Ok(string_arg) = arg.extract::<String>() {
1471 vec![string_arg]
1472 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1473 let vec: Vec<String> = list_arg.extract()?;
1474 vec
1475 } else {
1476 return Err(PyTypeError::new_err(
1477 "Argument must be either a string or a list of strings",
1478 ));
1479 };
1480 #[cfg(feature = "rayon")]
1481 {
1482 Ok(PyArray1::from_slice(
1483 py,
1484 ThreadPoolBuilder::new()
1485 .num_threads(threads.unwrap_or_else(num_cpus::get))
1486 .build()
1487 .map_err(LadduError::from)?
1488 .install(|| {
1489 self.0.project_with(
1490 ¶meters,
1491 &names,
1492 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1493 )
1494 })?
1495 .as_slice(),
1496 ))
1497 }
1498 #[cfg(not(feature = "rayon"))]
1499 {
1500 Ok(PyArray1::from_slice(
1501 py,
1502 self.0
1503 .project_with(
1504 ¶meters,
1505 &names,
1506 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1507 )?
1508 .as_slice(),
1509 ))
1510 }
1511 }
1512
1513 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
1593 #[allow(clippy::too_many_arguments)]
1594 fn minimize(
1595 &self,
1596 p0: Vec<Float>,
1597 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
1598 method: &str,
1599 max_steps: usize,
1600 debug: bool,
1601 verbose: bool,
1602 kwargs: Option<&Bound<'_, PyDict>>,
1603 ) -> PyResult<PyStatus> {
1604 let bounds = bounds.map(|bounds_vec| {
1605 bounds_vec
1606 .iter()
1607 .map(|(opt_lb, opt_ub)| {
1608 (
1609 opt_lb.unwrap_or(Float::NEG_INFINITY),
1610 opt_ub.unwrap_or(Float::INFINITY),
1611 )
1612 })
1613 .collect()
1614 });
1615 let n_parameters = p0.len();
1616 let options =
1617 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
1618 let status = self.0.minimize(&p0, bounds, Some(options))?;
1619 Ok(PyStatus(status))
1620 }
1621 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
1686 #[allow(clippy::too_many_arguments)]
1687 fn mcmc(
1688 &self,
1689 p0: Vec<Vec<Float>>,
1690 n_steps: usize,
1691 method: &str,
1692 debug: bool,
1693 verbose: bool,
1694 seed: u64,
1695 kwargs: Option<&Bound<'_, PyDict>>,
1696 ) -> PyResult<PyEnsemble> {
1697 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
1698 let mut rng = Rng::new();
1699 rng.seed(seed);
1700 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
1701 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
1702 Ok(PyEnsemble(ensemble))
1703 }
1704}
1705
1706#[derive(Clone, Debug)]
1709pub struct LikelihoodID(usize, Option<String>);
1710
1711impl Display for LikelihoodID {
1712 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1713 if let Some(name) = &self.1 {
1714 write!(f, "{}({})", name, self.0)
1715 } else {
1716 write!(f, "({})", self.0)
1717 }
1718 }
1719}
1720
1721#[cfg(feature = "python")]
1728#[pyclass(name = "LikelihoodID", module = "laddu")]
1729#[derive(Clone)]
1730pub struct PyLikelihoodID(LikelihoodID);
1731
1732#[cfg(feature = "python")]
1733#[pymethods]
1734impl PyLikelihoodID {
1735 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1736 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1737 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
1738 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1739 Ok(PyLikelihoodExpression(
1740 self.0.clone() + other_expr.0.clone(),
1741 ))
1742 } else if let Ok(int) = other.extract::<usize>() {
1743 if int == 0 {
1744 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1745 self.0.clone(),
1746 )))
1747 } else {
1748 Err(PyTypeError::new_err(
1749 "Addition with an integer for this type is only defined for 0",
1750 ))
1751 }
1752 } else {
1753 Err(PyTypeError::new_err("Unsupported operand type for +"))
1754 }
1755 }
1756 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1757 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1758 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
1759 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1760 Ok(PyLikelihoodExpression(
1761 other_expr.0.clone() + self.0.clone(),
1762 ))
1763 } else if let Ok(int) = other.extract::<usize>() {
1764 if int == 0 {
1765 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1766 self.0.clone(),
1767 )))
1768 } else {
1769 Err(PyTypeError::new_err(
1770 "Addition with an integer for this type is only defined for 0",
1771 ))
1772 }
1773 } else {
1774 Err(PyTypeError::new_err("Unsupported operand type for +"))
1775 }
1776 }
1777 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1778 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1779 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
1780 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1781 Ok(PyLikelihoodExpression(
1782 self.0.clone() * other_expr.0.clone(),
1783 ))
1784 } else {
1785 Err(PyTypeError::new_err("Unsupported operand type for *"))
1786 }
1787 }
1788 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1789 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1790 Ok(PyLikelihoodExpression(other_aid.0.clone() * self.0.clone()))
1791 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1792 Ok(PyLikelihoodExpression(
1793 other_expr.0.clone() * self.0.clone(),
1794 ))
1795 } else {
1796 Err(PyTypeError::new_err("Unsupported operand type for *"))
1797 }
1798 }
1799 fn __str__(&self) -> String {
1800 format!("{}", self.0)
1801 }
1802 fn __repr__(&self) -> String {
1803 format!("{:?}", self.0)
1804 }
1805}
1806
1807#[derive(Default, Clone)]
1809pub struct LikelihoodManager {
1810 terms: Vec<Box<dyn LikelihoodTerm>>,
1811 param_name_to_index: HashMap<String, usize>,
1812 param_names: Vec<String>,
1813 param_layouts: Vec<Vec<usize>>,
1814 param_counts: Vec<usize>,
1815}
1816
1817impl LikelihoodManager {
1818 pub fn register(&mut self, term: Box<dyn LikelihoodTerm>) -> LikelihoodID {
1821 let term_idx = self.terms.len();
1822 for param_name in term.parameters() {
1823 if !self.param_name_to_index.contains_key(¶m_name) {
1824 self.param_name_to_index
1825 .insert(param_name.clone(), self.param_name_to_index.len());
1826 self.param_names.push(param_name.clone());
1827 }
1828 }
1829 let param_layout: Vec<usize> = term
1830 .parameters()
1831 .iter()
1832 .map(|name| self.param_name_to_index[name])
1833 .collect();
1834 let param_count = term.parameters().len();
1835 self.param_layouts.push(param_layout);
1836 self.param_counts.push(param_count);
1837 self.terms.push(term.clone());
1838
1839 LikelihoodID(term_idx, None)
1840 }
1841
1842 pub fn register_with_name<T: AsRef<str>>(
1845 &mut self,
1846 term: Box<dyn LikelihoodTerm>,
1847 name: T,
1848 ) -> LikelihoodID {
1849 let term_idx = self.terms.len();
1850 for param_name in term.parameters() {
1851 if !self.param_name_to_index.contains_key(¶m_name) {
1852 self.param_name_to_index
1853 .insert(param_name.clone(), self.param_name_to_index.len());
1854 self.param_names.push(param_name.clone());
1855 }
1856 }
1857 let param_layout: Vec<usize> = term
1858 .parameters()
1859 .iter()
1860 .map(|name| self.param_name_to_index[name])
1861 .collect();
1862 let param_count = term.parameters().len();
1863 self.param_layouts.push(param_layout);
1864 self.param_counts.push(param_count);
1865 self.terms.push(term.clone());
1866
1867 LikelihoodID(term_idx, Some(name.as_ref().to_string().clone()))
1868 }
1869
1870 pub fn parameters(&self) -> Vec<String> {
1874 self.param_names.clone()
1875 }
1876
1877 pub fn load(&self, likelihood_expression: &LikelihoodExpression) -> LikelihoodEvaluator {
1880 LikelihoodEvaluator {
1881 likelihood_manager: self.clone(),
1882 likelihood_expression: likelihood_expression.clone(),
1883 }
1884 }
1885}
1886
1887#[cfg(feature = "python")]
1890#[pyclass(name = "LikelihoodManager", module = "laddu")]
1891#[derive(Clone)]
1892pub struct PyLikelihoodManager(LikelihoodManager);
1893
1894#[cfg(feature = "python")]
1895#[pymethods]
1896impl PyLikelihoodManager {
1897 #[new]
1898 fn new() -> Self {
1899 Self(LikelihoodManager::default())
1900 }
1901 #[pyo3(signature = (likelihood_term, *, name = None))]
1915 fn register(
1916 &mut self,
1917 likelihood_term: &PyLikelihoodTerm,
1918 name: Option<String>,
1919 ) -> PyLikelihoodID {
1920 if let Some(name) = name {
1921 PyLikelihoodID(self.0.register_with_name(likelihood_term.0.clone(), name))
1922 } else {
1923 PyLikelihoodID(self.0.register(likelihood_term.0.clone()))
1924 }
1925 }
1926 fn parameters(&self) -> Vec<String> {
1934 self.0.parameters()
1935 }
1936 fn load(&self, likelihood_expression: &PyLikelihoodExpression) -> PyLikelihoodEvaluator {
1961 PyLikelihoodEvaluator(self.0.load(&likelihood_expression.0))
1962 }
1963}
1964
1965#[derive(Debug)]
1966struct LikelihoodValues(Vec<Float>);
1967
1968#[derive(Debug)]
1969struct LikelihoodGradients(Vec<DVector<Float>>);
1970
1971#[derive(Clone, Default)]
1973pub enum LikelihoodExpression {
1974 #[default]
1976 Zero,
1977 One,
1979 Term(LikelihoodID),
1981 Add(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1983 Mul(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1985}
1986
1987impl Debug for LikelihoodExpression {
1988 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1989 self.write_tree(f, "", "", "")
1990 }
1991}
1992
1993impl Display for LikelihoodExpression {
1994 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1995 write!(f, "{:?}", self)
1996 }
1997}
1998
1999impl LikelihoodExpression {
2000 fn evaluate(&self, likelihood_values: &LikelihoodValues) -> Float {
2001 match self {
2002 LikelihoodExpression::Zero => 0.0,
2003 LikelihoodExpression::One => 1.0,
2004 LikelihoodExpression::Term(lid) => likelihood_values.0[lid.0],
2005 LikelihoodExpression::Add(a, b) => {
2006 a.evaluate(likelihood_values) + b.evaluate(likelihood_values)
2007 }
2008 LikelihoodExpression::Mul(a, b) => {
2009 a.evaluate(likelihood_values) * b.evaluate(likelihood_values)
2010 }
2011 }
2012 }
2013 fn evaluate_gradient(
2014 &self,
2015 likelihood_values: &LikelihoodValues,
2016 likelihood_gradients: &LikelihoodGradients,
2017 ) -> DVector<Float> {
2018 match self {
2019 LikelihoodExpression::Zero => DVector::zeros(0),
2020 LikelihoodExpression::One => DVector::zeros(0),
2021 LikelihoodExpression::Term(lid) => likelihood_gradients.0[lid.0].clone(),
2022 LikelihoodExpression::Add(a, b) => {
2023 a.evaluate_gradient(likelihood_values, likelihood_gradients)
2024 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2025 }
2026 LikelihoodExpression::Mul(a, b) => {
2027 a.evaluate_gradient(likelihood_values, likelihood_gradients)
2028 * b.evaluate(likelihood_values)
2029 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2030 * a.evaluate(likelihood_values)
2031 }
2032 }
2033 }
2034 fn write_tree(
2036 &self,
2037 f: &mut std::fmt::Formatter<'_>,
2038 parent_prefix: &str,
2039 immediate_prefix: &str,
2040 parent_suffix: &str,
2041 ) -> std::fmt::Result {
2042 let display_string = match self {
2043 Self::Zero => "0".to_string(),
2044 Self::One => "1".to_string(),
2045 Self::Term(lid) => {
2046 format!("{}", lid.0)
2047 }
2048 Self::Add(_, _) => "+".to_string(),
2049 Self::Mul(_, _) => "*".to_string(),
2050 };
2051 writeln!(f, "{}{}{}", parent_prefix, immediate_prefix, display_string)?;
2052 match self {
2053 Self::Term(_) | Self::Zero | Self::One => {}
2054 Self::Add(a, b) | Self::Mul(a, b) => {
2055 let terms = [a, b];
2056 let mut it = terms.iter().peekable();
2057 let child_prefix = format!("{}{}", parent_prefix, parent_suffix);
2058 while let Some(child) = it.next() {
2059 match it.peek() {
2060 Some(_) => child.write_tree(f, &child_prefix, "├─ ", "│ "),
2061 None => child.write_tree(f, &child_prefix, "└─ ", " "),
2062 }?;
2063 }
2064 }
2065 }
2066 Ok(())
2067 }
2068}
2069
2070impl_op_ex!(+ |a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(a.clone()), Box::new(b.clone()))});
2071impl_op_ex!(
2072 *|a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression {
2073 LikelihoodExpression::Mul(Box::new(a.clone()), Box::new(b.clone()))
2074 }
2075);
2076impl_op_ex_commutative!(+ |a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(b.clone()))});
2077impl_op_ex_commutative!(
2078 *|a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression {
2079 LikelihoodExpression::Mul(
2080 Box::new(LikelihoodExpression::Term(a.clone())),
2081 Box::new(b.clone()),
2082 )
2083 }
2084);
2085impl_op_ex!(+ |a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(LikelihoodExpression::Term(b.clone())))});
2086impl_op_ex!(
2087 *|a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression {
2088 LikelihoodExpression::Mul(
2089 Box::new(LikelihoodExpression::Term(a.clone())),
2090 Box::new(LikelihoodExpression::Term(b.clone())),
2091 )
2092 }
2093);
2094
2095#[cfg(feature = "python")]
2098#[pyclass(name = "LikelihoodExpression", module = "laddu")]
2099#[derive(Clone)]
2100pub struct PyLikelihoodExpression(LikelihoodExpression);
2101
2102#[cfg(feature = "python")]
2105#[pyfunction(name = "likelihood_sum")]
2106pub fn py_likelihood_sum(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2107 if terms.is_empty() {
2108 return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2109 }
2110 if terms.len() == 1 {
2111 let term = &terms[0];
2112 if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2113 return Ok(expression);
2114 }
2115 if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2116 return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2117 py_amplitude_id.0,
2118 )));
2119 }
2120 return Err(PyTypeError::new_err(
2121 "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2122 ));
2123 }
2124 let mut iter = terms.iter();
2125 let Some(first_term) = iter.next() else {
2126 return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2127 };
2128 if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2129 let mut summation = first_expression.clone();
2130 for term in iter {
2131 summation = summation.__add__(term)?;
2132 }
2133 return Ok(summation);
2134 }
2135 if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2136 let mut summation =
2137 PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2138 for term in iter {
2139 summation = summation.__add__(term)?;
2140 }
2141 return Ok(summation);
2142 }
2143 Err(PyTypeError::new_err(
2144 "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2145 ))
2146}
2147
2148#[cfg(feature = "python")]
2151#[pyfunction(name = "likelihood_product")]
2152pub fn py_likelihood_product(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2153 if terms.is_empty() {
2154 return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2155 }
2156 if terms.len() == 1 {
2157 let term = &terms[0];
2158 if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2159 return Ok(expression);
2160 }
2161 if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2162 return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2163 py_amplitude_id.0,
2164 )));
2165 }
2166 return Err(PyTypeError::new_err(
2167 "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2168 ));
2169 }
2170 let mut iter = terms.iter();
2171 let Some(first_term) = iter.next() else {
2172 return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2173 };
2174 if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2175 let mut product = first_expression.clone();
2176 for term in iter {
2177 product = product.__mul__(term)?;
2178 }
2179 return Ok(product);
2180 }
2181 if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2182 let mut product = PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2183 for term in iter {
2184 product = product.__mul__(term)?;
2185 }
2186 return Ok(product);
2187 }
2188 Err(PyTypeError::new_err(
2189 "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2190 ))
2191}
2192
2193#[cfg(feature = "python")]
2196#[pyfunction(name = "LikelihoodZero")]
2197pub fn py_likelihood_zero() -> PyLikelihoodExpression {
2198 PyLikelihoodExpression(LikelihoodExpression::Zero)
2199}
2200
2201#[cfg(feature = "python")]
2204#[pyfunction(name = "LikelihoodOne")]
2205pub fn py_likelihood_one() -> PyLikelihoodExpression {
2206 PyLikelihoodExpression(LikelihoodExpression::One)
2207}
2208
2209#[cfg(feature = "python")]
2210#[pymethods]
2211impl PyLikelihoodExpression {
2212 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2213 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2214 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
2215 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2216 Ok(PyLikelihoodExpression(
2217 self.0.clone() + other_expr.0.clone(),
2218 ))
2219 } else if let Ok(int) = other.extract::<usize>() {
2220 if int == 0 {
2221 Ok(PyLikelihoodExpression(self.0.clone()))
2222 } else {
2223 Err(PyTypeError::new_err(
2224 "Addition with an integer for this type is only defined for 0",
2225 ))
2226 }
2227 } else {
2228 Err(PyTypeError::new_err("Unsupported operand type for +"))
2229 }
2230 }
2231 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2232 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2233 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
2234 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2235 Ok(PyLikelihoodExpression(
2236 other_expr.0.clone() + self.0.clone(),
2237 ))
2238 } else if let Ok(int) = other.extract::<usize>() {
2239 if int == 0 {
2240 Ok(PyLikelihoodExpression(self.0.clone()))
2241 } else {
2242 Err(PyTypeError::new_err(
2243 "Addition with an integer for this type is only defined for 0",
2244 ))
2245 }
2246 } else {
2247 Err(PyTypeError::new_err("Unsupported operand type for +"))
2248 }
2249 }
2250 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2251 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2252 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2253 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2254 Ok(PyLikelihoodExpression(
2255 self.0.clone() * other_expr.0.clone(),
2256 ))
2257 } else {
2258 Err(PyTypeError::new_err("Unsupported operand type for *"))
2259 }
2260 }
2261 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2262 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2263 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2264 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2265 Ok(PyLikelihoodExpression(
2266 self.0.clone() * other_expr.0.clone(),
2267 ))
2268 } else {
2269 Err(PyTypeError::new_err("Unsupported operand type for *"))
2270 }
2271 }
2272 fn __str__(&self) -> String {
2273 format!("{}", self.0)
2274 }
2275 fn __repr__(&self) -> String {
2276 format!("{:?}", self.0)
2277 }
2278}
2279
2280pub struct LikelihoodEvaluator {
2282 likelihood_manager: LikelihoodManager,
2283 likelihood_expression: LikelihoodExpression,
2284}
2285
2286#[cfg(feature = "rayon")]
2287impl Function<ThreadPool, LadduError> for LikelihoodEvaluator {
2288 fn evaluate(
2289 &self,
2290 parameters: &[Float],
2291 thread_pool: &mut ThreadPool,
2292 ) -> Result<Float, LadduError> {
2293 thread_pool.install(|| self.evaluate(parameters))
2294 }
2295 fn gradient(
2296 &self,
2297 parameters: &[Float],
2298 thread_pool: &mut ThreadPool,
2299 ) -> Result<DVector<Float>, LadduError> {
2300 thread_pool.install(|| self.evaluate_gradient(parameters))
2301 }
2302}
2303
2304#[cfg(not(feature = "rayon"))]
2305impl Function<(), LadduError> for LikelihoodEvaluator {
2306 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
2307 self.evaluate(parameters)
2308 }
2309 fn gradient(
2310 &self,
2311 parameters: &[Float],
2312 _user_data: &mut (),
2313 ) -> Result<DVector<Float>, LadduError> {
2314 self.evaluate_gradient(parameters)
2315 }
2316}
2317
2318pub(crate) struct NegativeLikelihoodEvaluator<'a>(&'a LikelihoodEvaluator);
2319#[cfg(feature = "rayon")]
2320impl Function<ThreadPool, LadduError> for NegativeLikelihoodEvaluator<'_> {
2321 fn evaluate(
2322 &self,
2323 parameters: &[Float],
2324 thread_pool: &mut ThreadPool,
2325 ) -> Result<Float, LadduError> {
2326 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
2327 }
2328 fn gradient(
2329 &self,
2330 parameters: &[Float],
2331 thread_pool: &mut ThreadPool,
2332 ) -> Result<DVector<Float>, LadduError> {
2333 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
2334 }
2335}
2336
2337#[cfg(not(feature = "rayon"))]
2338impl Function<(), LadduError> for NegativeLikelihoodEvaluator<'_> {
2339 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
2340 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
2341 }
2342 fn gradient(
2343 &self,
2344 parameters: &[Float],
2345 user_data: &mut (),
2346 ) -> Result<DVector<Float>, LadduError> {
2347 Function::gradient(self.0, parameters, user_data).map(|res| -res)
2348 }
2349}
2350
2351impl LikelihoodEvaluator {
2352 pub fn parameters(&self) -> Vec<String> {
2354 self.likelihood_manager.parameters()
2355 }
2356 pub fn evaluate(&self, parameters: &[Float]) -> Result<Float, LadduError> {
2359 let mut param_buffers: Vec<Vec<Float>> = self
2360 .likelihood_manager
2361 .param_counts
2362 .iter()
2363 .map(|&count| vec![0.0; count])
2364 .collect();
2365 for (layout, buffer) in self
2366 .likelihood_manager
2367 .param_layouts
2368 .iter()
2369 .zip(param_buffers.iter_mut())
2370 {
2371 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2372 buffer[buffer_idx] = parameters[param_idx];
2373 }
2374 }
2375 let likelihood_values = LikelihoodValues(
2376 self.likelihood_manager
2377 .terms
2378 .iter()
2379 .zip(param_buffers.iter())
2380 .map(|(term, buffer)| term.evaluate(buffer))
2381 .collect(),
2382 );
2383 Ok(self.likelihood_expression.evaluate(&likelihood_values))
2384 }
2385
2386 pub fn evaluate_gradient(&self, parameters: &[Float]) -> Result<DVector<Float>, LadduError> {
2389 let mut param_buffers: Vec<Vec<Float>> = self
2390 .likelihood_manager
2391 .param_counts
2392 .iter()
2393 .map(|&count| vec![0.0; count])
2394 .collect();
2395 for (layout, buffer) in self
2396 .likelihood_manager
2397 .param_layouts
2398 .iter()
2399 .zip(param_buffers.iter_mut())
2400 {
2401 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2402 buffer[buffer_idx] = parameters[param_idx];
2403 }
2404 }
2405 let likelihood_values = LikelihoodValues(
2406 self.likelihood_manager
2407 .terms
2408 .iter()
2409 .zip(param_buffers.iter())
2410 .map(|(term, buffer)| term.evaluate(buffer))
2411 .collect(),
2412 );
2413 let mut gradient_buffers: Vec<DVector<Float>> = (0..self.likelihood_manager.terms.len())
2414 .map(|_| DVector::zeros(self.likelihood_manager.param_names.len()))
2415 .collect();
2416 for (((term, param_buffer), gradient_buffer), layout) in self
2417 .likelihood_manager
2418 .terms
2419 .iter()
2420 .zip(param_buffers.iter())
2421 .zip(gradient_buffers.iter_mut())
2422 .zip(self.likelihood_manager.param_layouts.iter())
2423 {
2424 let term_gradient = term.evaluate_gradient(param_buffer); for (term_idx, &buffer_idx) in layout.iter().enumerate() {
2426 gradient_buffer[buffer_idx] = term_gradient[term_idx] }
2428 }
2429 let likelihood_gradients = LikelihoodGradients(gradient_buffers);
2430 Ok(self
2431 .likelihood_expression
2432 .evaluate_gradient(&likelihood_values, &likelihood_gradients))
2433 }
2434
2435 pub fn minimize(
2440 &self,
2441 p0: &[Float],
2442 bounds: Option<Vec<(Float, Float)>>,
2443 options: Option<MinimizerOptions>,
2444 ) -> Result<Status, LadduError> {
2445 let options = options.unwrap_or_default();
2446 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
2447 .with_max_steps(options.max_steps);
2448 if let Some(bounds) = bounds {
2449 m = m.with_bounds(bounds);
2450 }
2451 for observer in options.observers {
2452 m = m.with_observer(observer)
2453 }
2454 #[cfg(feature = "rayon")]
2455 {
2456 m.minimize(
2457 self,
2458 p0,
2459 &mut ThreadPoolBuilder::new()
2460 .num_threads(options.threads)
2461 .build()
2462 .map_err(LadduError::from)?,
2463 )?;
2464 }
2465 #[cfg(not(feature = "rayon"))]
2466 {
2467 m.minimize(self, p0, &mut ())?;
2468 }
2469 Ok(m.status)
2470 }
2471
2472 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
2478 &self,
2479 p0: T,
2480 n_steps: usize,
2481 options: Option<MCMCOptions>,
2482 rng: Rng,
2483 ) -> Result<Ensemble, LadduError> {
2484 let options = options.unwrap_or(MCMCOptions::new_ess(
2485 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
2486 rng,
2487 ));
2488 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
2489 for observer in options.observers {
2490 m = m.with_observer(observer);
2491 }
2492 let func = NegativeLikelihoodEvaluator(self);
2493 #[cfg(feature = "rayon")]
2494 {
2495 m.sample(
2496 &func,
2497 &mut ThreadPoolBuilder::new()
2498 .num_threads(options.threads)
2499 .build()
2500 .map_err(LadduError::from)?,
2501 n_steps,
2502 )?;
2503 }
2504 #[cfg(not(feature = "rayon"))]
2505 {
2506 m.sample(&func, &mut (), n_steps)?;
2507 }
2508 Ok(m.ensemble)
2509 }
2510}
2511
2512#[cfg(feature = "python")]
2516#[pyclass(name = "LikelihoodEvaluator", module = "laddu")]
2517pub struct PyLikelihoodEvaluator(LikelihoodEvaluator);
2518
2519#[cfg(feature = "python")]
2520#[pymethods]
2521impl PyLikelihoodEvaluator {
2522 #[getter]
2529 fn parameters(&self) -> Vec<String> {
2530 self.0.parameters()
2531 }
2532 #[pyo3(signature = (parameters, *, threads=None))]
2552 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
2553 #[cfg(feature = "rayon")]
2554 {
2555 Ok(ThreadPoolBuilder::new()
2556 .num_threads(threads.unwrap_or_else(num_cpus::get))
2557 .build()
2558 .map_err(LadduError::from)?
2559 .install(|| self.0.evaluate(¶meters))?)
2560 }
2561 #[cfg(not(feature = "rayon"))]
2562 {
2563 Ok(self.0.evaluate(¶meters)?)
2564 }
2565 }
2566 #[pyo3(signature = (parameters, *, threads=None))]
2588 fn evaluate_gradient<'py>(
2589 &self,
2590 py: Python<'py>,
2591 parameters: Vec<Float>,
2592 threads: Option<usize>,
2593 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
2594 #[cfg(feature = "rayon")]
2595 {
2596 Ok(PyArray1::from_slice(
2597 py,
2598 ThreadPoolBuilder::new()
2599 .num_threads(threads.unwrap_or_else(num_cpus::get))
2600 .build()
2601 .map_err(LadduError::from)?
2602 .install(|| self.0.evaluate_gradient(¶meters))?
2603 .as_slice(),
2604 ))
2605 }
2606 #[cfg(not(feature = "rayon"))]
2607 {
2608 Ok(PyArray1::from_slice(
2609 py,
2610 self.0.evaluate_gradient(¶meters)?.as_slice(),
2611 ))
2612 }
2613 }
2614
2615 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
2692 #[allow(clippy::too_many_arguments)]
2693 fn minimize(
2694 &self,
2695 p0: Vec<Float>,
2696 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
2697 method: &str,
2698 max_steps: usize,
2699 debug: bool,
2700 verbose: bool,
2701 kwargs: Option<&Bound<'_, PyDict>>,
2702 ) -> PyResult<PyStatus> {
2703 let bounds = bounds.map(|bounds_vec| {
2704 bounds_vec
2705 .iter()
2706 .map(|(opt_lb, opt_ub)| {
2707 (
2708 opt_lb.unwrap_or(Float::NEG_INFINITY),
2709 opt_ub.unwrap_or(Float::INFINITY),
2710 )
2711 })
2712 .collect()
2713 });
2714 let n_parameters = p0.len();
2715 let options =
2716 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
2717 let status = self.0.minimize(&p0, bounds, Some(options))?;
2718 Ok(PyStatus(status))
2719 }
2720
2721 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
2786 #[allow(clippy::too_many_arguments)]
2787 fn mcmc(
2788 &self,
2789 p0: Vec<Vec<Float>>,
2790 n_steps: usize,
2791 method: &str,
2792 debug: bool,
2793 verbose: bool,
2794 seed: u64,
2795 kwargs: Option<&Bound<'_, PyDict>>,
2796 ) -> PyResult<PyEnsemble> {
2797 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
2798 let mut rng = Rng::new();
2799 rng.seed(seed);
2800 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
2801 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
2802 Ok(PyEnsemble(ensemble))
2803 }
2804}
2805
2806#[derive(Clone)]
2808pub struct LikelihoodScalar(String);
2809
2810impl LikelihoodScalar {
2811 pub fn new<T: AsRef<str>>(name: T) -> Box<Self> {
2813 Self(name.as_ref().into()).into()
2814 }
2815}
2816
2817impl LikelihoodTerm for LikelihoodScalar {
2818 fn evaluate(&self, parameters: &[Float]) -> Float {
2819 parameters[0]
2820 }
2821
2822 fn evaluate_gradient(&self, _parameters: &[Float]) -> DVector<Float> {
2823 DVector::from_vec(vec![1.0])
2824 }
2825
2826 fn parameters(&self) -> Vec<String> {
2827 vec![self.0.clone()]
2828 }
2829}
2830
2831#[cfg(feature = "python")]
2843#[pyfunction(name = "LikelihoodScalar")]
2844pub fn py_likelihood_scalar(name: String) -> PyLikelihoodTerm {
2845 PyLikelihoodTerm(LikelihoodScalar::new(name))
2846}