1use std::{
2 collections::HashMap,
3 fmt::{Debug, Display},
4 sync::Arc,
5};
6
7use accurate::{sum::Klein, traits::*};
8use auto_ops::*;
9use dyn_clone::DynClone;
10use fastrand::Rng;
11use ganesh::{
12 mcmc::{ESSMove, Ensemble},
13 Function, Minimizer, Sampler, Status,
14};
15use laddu_core::{
16 amplitudes::{central_difference, AmplitudeValues, Evaluator, GradientValues, Model},
17 data::Dataset,
18 resources::Parameters,
19 Complex, DVector, Float, LadduError,
20};
21
22#[cfg(feature = "mpi")]
23use laddu_core::mpi::LadduMPI;
24
25#[cfg(feature = "mpi")]
26use mpi::{datatype::PartitionMut, topology::SimpleCommunicator, traits::*};
27
28#[cfg(feature = "python")]
29use crate::ganesh_ext::py_ganesh::{
30 py_parse_mcmc_options, py_parse_minimizer_options, PyEnsemble, PyStatus,
31};
32#[cfg(feature = "python")]
33use laddu_python::{
34 amplitudes::{PyEvaluator, PyModel},
35 data::PyDataset,
36};
37#[cfg(feature = "python")]
38use numpy::PyArray1;
39#[cfg(feature = "python")]
40use pyo3::{
41 exceptions::PyTypeError,
42 prelude::*,
43 types::{PyDict, PyList},
44};
45#[cfg(feature = "rayon")]
46use rayon::{prelude::*, ThreadPool, ThreadPoolBuilder};
47
48use crate::ganesh_ext::{MCMCOptions, MinimizerOptions};
49
50pub trait LikelihoodTerm: DynClone + Send + Sync {
53 fn evaluate(&self, parameters: &[Float]) -> Float;
55 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
57 central_difference(parameters, |parameters: &[Float]| self.evaluate(parameters))
58 }
59 fn parameters(&self) -> Vec<String>;
61}
62
63dyn_clone::clone_trait_object!(LikelihoodTerm);
64
65#[cfg(feature = "python")]
72#[pyclass(name = "LikelihoodTerm", module = "laddu")]
73#[derive(Clone)]
74pub struct PyLikelihoodTerm(pub Box<dyn LikelihoodTerm>);
75
76#[derive(Clone)]
78pub struct NLL {
79 pub data_evaluator: Evaluator,
81 pub accmc_evaluator: Evaluator,
83}
84
85impl NLL {
86 pub fn new(model: &Model, ds_data: &Arc<Dataset>, ds_accmc: &Arc<Dataset>) -> Box<Self> {
89 Self {
90 data_evaluator: model.load(ds_data),
91 accmc_evaluator: model.load(ds_accmc),
92 }
93 .into()
94 }
95 pub fn activate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
97 self.data_evaluator.activate(&name)?;
98 self.accmc_evaluator.activate(&name)
99 }
100 pub fn activate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
102 self.data_evaluator.activate_many(names)?;
103 self.accmc_evaluator.activate_many(names)
104 }
105 pub fn activate_all(&self) {
107 self.data_evaluator.activate_all();
108 self.accmc_evaluator.activate_all();
109 }
110 pub fn deactivate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
112 self.data_evaluator.deactivate(&name)?;
113 self.accmc_evaluator.deactivate(&name)
114 }
115 pub fn deactivate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
117 self.data_evaluator.deactivate_many(names)?;
118 self.accmc_evaluator.deactivate_many(names)
119 }
120 pub fn deactivate_all(&self) {
122 self.data_evaluator.deactivate_all();
123 self.accmc_evaluator.deactivate_all();
124 }
125 pub fn isolate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
127 self.data_evaluator.isolate(&name)?;
128 self.accmc_evaluator.isolate(&name)
129 }
130 pub fn isolate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
132 self.data_evaluator.isolate_many(names)?;
133 self.accmc_evaluator.isolate_many(names)
134 }
135
136 pub fn project_local(
145 &self,
146 parameters: &[Float],
147 mc_evaluator: Option<Evaluator>,
148 ) -> Vec<Float> {
149 let (mc_dataset, result) = if let Some(mc_evaluator) = mc_evaluator {
150 (
151 mc_evaluator.dataset.clone(),
152 mc_evaluator.evaluate_local(parameters),
153 )
154 } else {
155 (
156 self.accmc_evaluator.dataset.clone(),
157 self.accmc_evaluator.evaluate_local(parameters),
158 )
159 };
160 let n_mc = self.accmc_evaluator.dataset.n_events();
161 #[cfg(feature = "rayon")]
162 let output: Vec<Float> = result
163 .par_iter()
164 .zip(mc_dataset.events.par_iter())
165 .map(|(l, e)| e.weight * l.re / n_mc as Float)
166 .collect();
167
168 #[cfg(not(feature = "rayon"))]
169 let output: Vec<Float> = result
170 .iter()
171 .zip(mc_dataset.events.iter())
172 .map(|(l, e)| e.weight * l.re / n_mc as Float)
173 .collect();
174 output
175 }
176
177 #[cfg(feature = "mpi")]
186 pub fn project_mpi(
187 &self,
188 parameters: &[Float],
189 mc_evaluator: Option<Evaluator>,
190 world: &SimpleCommunicator,
191 ) -> Vec<Float> {
192 let n_events = mc_evaluator
193 .as_ref()
194 .unwrap_or(&self.accmc_evaluator)
195 .dataset
196 .n_events();
197 let local_projection = self.project_local(parameters, mc_evaluator);
198 let mut buffer: Vec<Float> = vec![0.0; n_events];
199 let (counts, displs) = world.get_counts_displs(n_events);
200 {
201 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
202 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
203 }
204 buffer
205 }
206
207 pub fn project(&self, parameters: &[Float], mc_evaluator: Option<Evaluator>) -> Vec<Float> {
221 #[cfg(feature = "mpi")]
222 {
223 if let Some(world) = laddu_core::mpi::get_world() {
224 return self.project_mpi(parameters, mc_evaluator, &world);
225 }
226 }
227 self.project_local(parameters, mc_evaluator)
228 }
229
230 pub fn project_gradient_local(
239 &self,
240 parameters: &[Float],
241 mc_evaluator: Option<Evaluator>,
242 ) -> (Vec<Float>, Vec<DVector<Float>>) {
243 let (mc_dataset, result, result_gradient) = if let Some(mc_evaluator) = mc_evaluator {
244 (
245 mc_evaluator.dataset.clone(),
246 mc_evaluator.evaluate_local(parameters),
247 mc_evaluator.evaluate_gradient_local(parameters),
248 )
249 } else {
250 (
251 self.accmc_evaluator.dataset.clone(),
252 self.accmc_evaluator.evaluate_local(parameters),
253 self.accmc_evaluator.evaluate_gradient_local(parameters),
254 )
255 };
256 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
257 #[cfg(feature = "rayon")]
258 {
259 (
260 result
261 .par_iter()
262 .zip(mc_dataset.events.par_iter())
263 .map(|(l, e)| e.weight * l.re / n_mc)
264 .collect(),
265 result_gradient
266 .par_iter()
267 .zip(mc_dataset.events.par_iter())
268 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
269 .collect(),
270 )
271 }
272 #[cfg(not(feature = "rayon"))]
273 {
274 (
275 result
276 .iter()
277 .zip(mc_dataset.events.iter())
278 .map(|(l, e)| e.weight * l.re / n_mc)
279 .collect(),
280 result_gradient
281 .iter()
282 .zip(mc_dataset.events.iter())
283 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
284 .collect(),
285 )
286 }
287 }
288
289 #[cfg(feature = "mpi")]
298 pub fn project_gradient_mpi(
299 &self,
300 parameters: &[Float],
301 mc_evaluator: Option<Evaluator>,
302 world: &SimpleCommunicator,
303 ) -> (Vec<Float>, Vec<DVector<Float>>) {
304 let n_events = mc_evaluator
305 .as_ref()
306 .unwrap_or(&self.accmc_evaluator)
307 .dataset
308 .n_events();
309 let (local_projection, local_gradient_projection) =
310 self.project_gradient_local(parameters, mc_evaluator);
311 let mut projection_result: Vec<Float> = vec![0.0; n_events];
312 let (counts, displs) = world.get_counts_displs(n_events);
313 {
314 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
315 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
316 }
317
318 let flattened_local_gradient_projection = local_gradient_projection
319 .iter()
320 .flat_map(|g| g.data.as_vec().to_vec())
321 .collect::<Vec<Float>>();
322 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
323 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
324 let mut partitioned_flattened_result_buffer =
325 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
326 world.all_gather_varcount_into(
327 &flattened_local_gradient_projection,
328 &mut partitioned_flattened_result_buffer,
329 );
330 let gradient_projection_result = flattened_result_buffer
331 .chunks(parameters.len())
332 .map(DVector::from_row_slice)
333 .collect();
334 (projection_result, gradient_projection_result)
335 }
336 pub fn project_gradient(
350 &self,
351 parameters: &[Float],
352 mc_evaluator: Option<Evaluator>,
353 ) -> (Vec<Float>, Vec<DVector<Float>>) {
354 #[cfg(feature = "mpi")]
355 {
356 if let Some(world) = laddu_core::mpi::get_world() {
357 return self.project_gradient_mpi(parameters, mc_evaluator, &world);
358 }
359 }
360 self.project_gradient_local(parameters, mc_evaluator)
361 }
362
363 pub fn project_with_local<T: AsRef<str>>(
373 &self,
374 parameters: &[Float],
375 names: &[T],
376 mc_evaluator: Option<Evaluator>,
377 ) -> Result<Vec<Float>, LadduError> {
378 if let Some(mc_evaluator) = &mc_evaluator {
379 let current_active_mc = mc_evaluator.resources.read().active.clone();
380 mc_evaluator.isolate_many(names)?;
381 let mc_dataset = mc_evaluator.dataset.clone();
382 let result = mc_evaluator.evaluate_local(parameters);
383 let n_mc = self.accmc_evaluator.dataset.n_events();
384 #[cfg(feature = "rayon")]
385 let output: Vec<Float> = result
386 .par_iter()
387 .zip(mc_dataset.events.par_iter())
388 .map(|(l, e)| e.weight * l.re / n_mc as Float)
389 .collect();
390 #[cfg(not(feature = "rayon"))]
391 let output: Vec<Float> = result
392 .iter()
393 .zip(mc_dataset.events.iter())
394 .map(|(l, e)| e.weight * l.re / n_mc as Float)
395 .collect();
396 mc_evaluator.resources.write().active = current_active_mc;
397 Ok(output)
398 } else {
399 let current_active_data = self.data_evaluator.resources.read().active.clone();
400 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
401 self.isolate_many(names)?;
402 let mc_dataset = &self.accmc_evaluator.dataset;
403 let result = self.accmc_evaluator.evaluate_local(parameters);
404 let n_mc = self.accmc_evaluator.dataset.n_events();
405 #[cfg(feature = "rayon")]
406 let output: Vec<Float> = result
407 .par_iter()
408 .zip(mc_dataset.events.par_iter())
409 .map(|(l, e)| e.weight * l.re / n_mc as Float)
410 .collect();
411 #[cfg(not(feature = "rayon"))]
412 let output: Vec<Float> = result
413 .iter()
414 .zip(mc_dataset.events.iter())
415 .map(|(l, e)| e.weight * l.re / n_mc as Float)
416 .collect();
417 self.data_evaluator.resources.write().active = current_active_data;
418 self.accmc_evaluator.resources.write().active = current_active_accmc;
419 Ok(output)
420 }
421 }
422
423 #[cfg(feature = "mpi")]
433 pub fn project_with_mpi<T: AsRef<str>>(
434 &self,
435 parameters: &[Float],
436 names: &[T],
437 mc_evaluator: Option<Evaluator>,
438 world: &SimpleCommunicator,
439 ) -> Result<Vec<Float>, LadduError> {
440 let n_events = mc_evaluator
441 .as_ref()
442 .unwrap_or(&self.accmc_evaluator)
443 .dataset
444 .n_events();
445 let local_projection = self.project_with_local(parameters, names, mc_evaluator)?;
446 let mut buffer: Vec<Float> = vec![0.0; n_events];
447 let (counts, displs) = world.get_counts_displs(n_events);
448 {
449 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
450 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
451 }
452 Ok(buffer)
453 }
454
455 pub fn project_with<T: AsRef<str>>(
472 &self,
473 parameters: &[Float],
474 names: &[T],
475 mc_evaluator: Option<Evaluator>,
476 ) -> Result<Vec<Float>, LadduError> {
477 #[cfg(feature = "mpi")]
478 {
479 if let Some(world) = laddu_core::mpi::get_world() {
480 return self.project_with_mpi(parameters, names, mc_evaluator, &world);
481 }
482 }
483 self.project_with_local(parameters, names, mc_evaluator)
484 }
485
486 pub fn project_gradient_with_local<T: AsRef<str>>(
497 &self,
498 parameters: &[Float],
499 names: &[T],
500 mc_evaluator: Option<Evaluator>,
501 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
502 if let Some(mc_evaluator) = &mc_evaluator {
503 let current_active_mc = mc_evaluator.resources.read().active.clone();
504 mc_evaluator.isolate_many(names)?;
505 let mc_dataset = mc_evaluator.dataset.clone();
506 let result = mc_evaluator.evaluate_local(parameters);
507 let result_gradient = mc_evaluator.evaluate_gradient(parameters);
508 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
509 #[cfg(feature = "rayon")]
510 let (res, res_gradient) = {
511 (
512 result
513 .par_iter()
514 .zip(mc_dataset.events.par_iter())
515 .map(|(l, e)| e.weight * l.re / n_mc)
516 .collect(),
517 result_gradient
518 .par_iter()
519 .zip(mc_dataset.events.par_iter())
520 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
521 .collect(),
522 )
523 };
524 #[cfg(not(feature = "rayon"))]
525 let (res, res_gradient) = {
526 (
527 result
528 .iter()
529 .zip(mc_dataset.events.iter())
530 .map(|(l, e)| e.weight * l.re / n_mc)
531 .collect(),
532 result_gradient
533 .iter()
534 .zip(mc_dataset.events.iter())
535 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
536 .collect(),
537 )
538 };
539 mc_evaluator.resources.write().active = current_active_mc;
540 Ok((res, res_gradient))
541 } else {
542 let current_active_data = self.data_evaluator.resources.read().active.clone();
543 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
544 self.isolate_many(names)?;
545 let mc_dataset = &self.accmc_evaluator.dataset;
546 let result = self.accmc_evaluator.evaluate_local(parameters);
547 let result_gradient = self.accmc_evaluator.evaluate_gradient(parameters);
548 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
549 #[cfg(feature = "rayon")]
550 let (res, res_gradient) = {
551 (
552 result
553 .par_iter()
554 .zip(mc_dataset.events.par_iter())
555 .map(|(l, e)| e.weight * l.re / n_mc)
556 .collect(),
557 result_gradient
558 .par_iter()
559 .zip(mc_dataset.events.par_iter())
560 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
561 .collect(),
562 )
563 };
564 #[cfg(not(feature = "rayon"))]
565 let (res, res_gradient) = {
566 (
567 result
568 .iter()
569 .zip(mc_dataset.events.iter())
570 .map(|(l, e)| e.weight * l.re / n_mc)
571 .collect(),
572 result_gradient
573 .iter()
574 .zip(mc_dataset.events.iter())
575 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
576 .collect(),
577 )
578 };
579 self.data_evaluator.resources.write().active = current_active_data;
580 self.accmc_evaluator.resources.write().active = current_active_accmc;
581 Ok((res, res_gradient))
582 }
583 }
584
585 #[cfg(feature = "mpi")]
596 pub fn project_gradient_with_mpi<T: AsRef<str>>(
597 &self,
598 parameters: &[Float],
599 names: &[T],
600 mc_evaluator: Option<Evaluator>,
601 world: &SimpleCommunicator,
602 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
603 let n_events = mc_evaluator
604 .as_ref()
605 .unwrap_or(&self.accmc_evaluator)
606 .dataset
607 .n_events();
608 let (local_projection, local_gradient_projection) =
609 self.project_gradient_with_local(parameters, names, mc_evaluator)?;
610 let mut projection_result: Vec<Float> = vec![0.0; n_events];
611 let (counts, displs) = world.get_counts_displs(n_events);
612 {
613 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
614 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
615 }
616
617 let flattened_local_gradient_projection = local_gradient_projection
618 .iter()
619 .flat_map(|g| g.data.as_vec().to_vec())
620 .collect::<Vec<Float>>();
621 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
622 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
623 let mut partitioned_flattened_result_buffer =
624 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
625 world.all_gather_varcount_into(
626 &flattened_local_gradient_projection,
627 &mut partitioned_flattened_result_buffer,
628 );
629 let gradient_projection_result = flattened_result_buffer
630 .chunks(parameters.len())
631 .map(DVector::from_row_slice)
632 .collect();
633 Ok((projection_result, gradient_projection_result))
634 }
635 pub fn project_gradient_with<T: AsRef<str>>(
654 &self,
655 parameters: &[Float],
656 names: &[T],
657 mc_evaluator: Option<Evaluator>,
658 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
659 #[cfg(feature = "mpi")]
660 {
661 if let Some(world) = laddu_core::mpi::get_world() {
662 return self.project_gradient_with_mpi(parameters, names, mc_evaluator, &world);
663 }
664 }
665 self.project_gradient_with_local(parameters, names, mc_evaluator)
666 }
667
668 fn evaluate_local(&self, parameters: &[Float]) -> Float {
669 let data_result = self.data_evaluator.evaluate_local(parameters);
670 let mc_result = self.accmc_evaluator.evaluate_local(parameters);
671 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
672 #[cfg(feature = "rayon")]
673 let data_term: Float = data_result
674 .par_iter()
675 .zip(self.data_evaluator.dataset.events.par_iter())
676 .map(|(l, e)| e.weight * Float::ln(l.re))
677 .parallel_sum_with_accumulator::<Klein<Float>>();
678 #[cfg(feature = "rayon")]
679 let mc_term: Float = mc_result
680 .par_iter()
681 .zip(self.accmc_evaluator.dataset.events.par_iter())
682 .map(|(l, e)| e.weight * l.re)
683 .parallel_sum_with_accumulator::<Klein<Float>>();
684 #[cfg(not(feature = "rayon"))]
685 let data_term: Float = data_result
686 .iter()
687 .zip(self.data_evaluator.dataset.events.iter())
688 .map(|(l, e)| e.weight * Float::ln(l.re))
689 .sum_with_accumulator::<Klein<Float>>();
690 #[cfg(not(feature = "rayon"))]
691 let mc_term: Float = mc_result
692 .iter()
693 .zip(self.accmc_evaluator.dataset.events.iter())
694 .map(|(l, e)| e.weight * l.re)
695 .sum_with_accumulator::<Klein<Float>>();
696 -2.0 * (data_term - mc_term / n_mc)
697 }
698
699 #[cfg(feature = "mpi")]
700 fn evaluate_mpi(&self, parameters: &[Float], world: &SimpleCommunicator) -> Float {
701 let local_evaluation = self.evaluate_local(parameters);
702 let mut buffer: Vec<Float> = vec![0.0; world.size() as usize];
703 world.all_gather_into(&local_evaluation, &mut buffer);
704 buffer.iter().sum()
705 }
706
707 fn evaluate_gradient_local(&self, parameters: &[Float]) -> DVector<Float> {
708 let data_resources = self.data_evaluator.resources.read();
709 let data_parameters = Parameters::new(parameters, &data_resources.constants);
710 let mc_resources = self.accmc_evaluator.resources.read();
711 let mc_parameters = Parameters::new(parameters, &mc_resources.constants);
712 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
713 #[cfg(feature = "rayon")]
714 let data_term: DVector<Float> = self
715 .data_evaluator
716 .dataset
717 .events
718 .par_iter()
719 .zip(data_resources.caches.par_iter())
720 .map(|(event, cache)| {
721 let mut gradient_values =
722 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
723 self.data_evaluator
724 .amplitudes
725 .iter()
726 .zip(data_resources.active.iter())
727 .zip(gradient_values.iter_mut())
728 .for_each(|((amp, active), grad)| {
729 if *active {
730 amp.compute_gradient(&data_parameters, event, cache, grad)
731 }
732 });
733 (
734 event.weight,
735 AmplitudeValues(
736 self.data_evaluator
737 .amplitudes
738 .iter()
739 .zip(data_resources.active.iter())
740 .map(|(amp, active)| {
741 if *active {
742 amp.compute(&data_parameters, event, cache)
743 } else {
744 Complex::ZERO
745 }
746 })
747 .collect(),
748 ),
749 GradientValues(parameters.len(), gradient_values),
750 )
751 })
752 .map(|(weight, amp_vals, grad_vals)| {
753 (
754 weight,
755 self.data_evaluator.expression.evaluate(&_vals),
756 self.data_evaluator
757 .expression
758 .evaluate_gradient(&_vals, &grad_vals),
759 )
760 })
761 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
762 .collect::<Vec<DVector<Float>>>()
763 .iter()
764 .sum(); #[cfg(feature = "rayon")]
766 let mc_term: DVector<Float> = self
767 .accmc_evaluator
768 .dataset
769 .events
770 .par_iter()
771 .zip(mc_resources.caches.par_iter())
772 .map(|(event, cache)| {
773 let mut gradient_values =
774 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
775 self.accmc_evaluator
776 .amplitudes
777 .iter()
778 .zip(mc_resources.active.iter())
779 .zip(gradient_values.iter_mut())
780 .for_each(|((amp, active), grad)| {
781 if *active {
782 amp.compute_gradient(&mc_parameters, event, cache, grad)
783 }
784 });
785 (
786 event.weight,
787 AmplitudeValues(
788 self.accmc_evaluator
789 .amplitudes
790 .iter()
791 .zip(mc_resources.active.iter())
792 .map(|(amp, active)| {
793 if *active {
794 amp.compute(&mc_parameters, event, cache)
795 } else {
796 Complex::ZERO
797 }
798 })
799 .collect(),
800 ),
801 GradientValues(parameters.len(), gradient_values),
802 )
803 })
804 .map(|(weight, amp_vals, grad_vals)| {
805 (
806 weight,
807 self.accmc_evaluator
808 .expression
809 .evaluate_gradient(&_vals, &grad_vals),
810 )
811 })
812 .map(|(w, g)| w * g.map(|gi| gi.re))
813 .collect::<Vec<DVector<Float>>>()
814 .iter()
815 .sum();
816 #[cfg(not(feature = "rayon"))]
817 let data_term: DVector<Float> = self
818 .data_evaluator
819 .dataset
820 .events
821 .iter()
822 .zip(data_resources.caches.iter())
823 .map(|(event, cache)| {
824 let mut gradient_values =
825 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
826 self.data_evaluator
827 .amplitudes
828 .iter()
829 .zip(data_resources.active.iter())
830 .zip(gradient_values.iter_mut())
831 .for_each(|((amp, active), grad)| {
832 if *active {
833 amp.compute_gradient(&data_parameters, event, cache, grad)
834 }
835 });
836 (
837 event.weight,
838 AmplitudeValues(
839 self.data_evaluator
840 .amplitudes
841 .iter()
842 .zip(data_resources.active.iter())
843 .map(|(amp, active)| {
844 if *active {
845 amp.compute(&data_parameters, event, cache)
846 } else {
847 Complex::ZERO
848 }
849 })
850 .collect(),
851 ),
852 GradientValues(parameters.len(), gradient_values),
853 )
854 })
855 .map(|(weight, amp_vals, grad_vals)| {
856 (
857 weight,
858 self.data_evaluator.expression.evaluate(&_vals),
859 self.data_evaluator
860 .expression
861 .evaluate_gradient(&_vals, &grad_vals),
862 )
863 })
864 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
865 .sum();
866 #[cfg(not(feature = "rayon"))]
867 let mc_term: DVector<Float> = self
868 .accmc_evaluator
869 .dataset
870 .events
871 .iter()
872 .zip(mc_resources.caches.iter())
873 .map(|(event, cache)| {
874 let mut gradient_values =
875 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
876 self.accmc_evaluator
877 .amplitudes
878 .iter()
879 .zip(mc_resources.active.iter())
880 .zip(gradient_values.iter_mut())
881 .for_each(|((amp, active), grad)| {
882 if *active {
883 amp.compute_gradient(&mc_parameters, event, cache, grad)
884 }
885 });
886 (
887 event.weight,
888 AmplitudeValues(
889 self.accmc_evaluator
890 .amplitudes
891 .iter()
892 .zip(mc_resources.active.iter())
893 .map(|(amp, active)| {
894 if *active {
895 amp.compute(&mc_parameters, event, cache)
896 } else {
897 Complex::ZERO
898 }
899 })
900 .collect(),
901 ),
902 GradientValues(parameters.len(), gradient_values),
903 )
904 })
905 .map(|(weight, amp_vals, grad_vals)| {
906 (
907 weight,
908 self.accmc_evaluator
909 .expression
910 .evaluate_gradient(&_vals, &grad_vals),
911 )
912 })
913 .map(|(w, g)| w * g.map(|gi| gi.re))
914 .sum();
915 -2.0 * (data_term - mc_term / n_mc)
916 }
917
918 #[cfg(feature = "mpi")]
919 fn evaluate_gradient_mpi(
920 &self,
921 parameters: &[Float],
922 world: &SimpleCommunicator,
923 ) -> DVector<Float> {
924 let local_evaluation_vec = self
925 .evaluate_gradient_local(parameters)
926 .data
927 .as_vec()
928 .to_vec();
929 let mut flattened_result_buffer = vec![0.0; world.size() as usize * parameters.len()];
930 world.all_gather_into(&local_evaluation_vec, &mut flattened_result_buffer);
931 flattened_result_buffer
932 .chunks(parameters.len())
933 .map(DVector::from_row_slice)
934 .sum::<DVector<Float>>()
935 }
936}
937
938impl LikelihoodTerm for NLL {
939 fn parameters(&self) -> Vec<String> {
942 self.data_evaluator
943 .resources
944 .read()
945 .parameters
946 .iter()
947 .cloned()
948 .collect()
949 }
950
951 fn evaluate(&self, parameters: &[Float]) -> Float {
961 #[cfg(feature = "mpi")]
962 {
963 if let Some(world) = laddu_core::mpi::get_world() {
964 return self.evaluate_mpi(parameters, &world);
965 }
966 }
967 self.evaluate_local(parameters)
968 }
969
970 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
976 #[cfg(feature = "mpi")]
977 {
978 if let Some(world) = laddu_core::mpi::get_world() {
979 return self.evaluate_gradient_mpi(parameters, &world);
980 }
981 }
982 self.evaluate_gradient_local(parameters)
983 }
984}
985
986#[cfg(feature = "rayon")]
987impl Function<ThreadPool, LadduError> for NLL {
988 fn evaluate(
989 &self,
990 parameters: &[Float],
991 thread_pool: &mut ThreadPool,
992 ) -> Result<Float, LadduError> {
993 Ok(thread_pool.install(|| LikelihoodTerm::evaluate(self, parameters)))
994 }
995 fn gradient(
996 &self,
997 parameters: &[Float],
998 thread_pool: &mut ThreadPool,
999 ) -> Result<DVector<Float>, LadduError> {
1000 Ok(thread_pool.install(|| LikelihoodTerm::evaluate_gradient(self, parameters)))
1001 }
1002}
1003
1004#[cfg(not(feature = "rayon"))]
1005impl Function<(), LadduError> for NLL {
1006 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
1007 Ok(LikelihoodTerm::evaluate(self, parameters))
1008 }
1009 fn gradient(
1010 &self,
1011 parameters: &[Float],
1012 _user_data: &mut (),
1013 ) -> Result<DVector<Float>, LadduError> {
1014 Ok(LikelihoodTerm::evaluate_gradient(self, parameters))
1015 }
1016}
1017
1018pub(crate) struct LogLikelihood<'a>(&'a NLL);
1019
1020#[cfg(feature = "rayon")]
1021impl Function<ThreadPool, LadduError> for LogLikelihood<'_> {
1022 fn evaluate(
1023 &self,
1024 parameters: &[Float],
1025 thread_pool: &mut ThreadPool,
1026 ) -> Result<Float, LadduError> {
1027 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
1028 }
1029 fn gradient(
1030 &self,
1031 parameters: &[Float],
1032 thread_pool: &mut ThreadPool,
1033 ) -> Result<DVector<Float>, LadduError> {
1034 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
1035 }
1036}
1037
1038#[cfg(not(feature = "rayon"))]
1039impl Function<(), LadduError> for LogLikelihood<'_> {
1040 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
1041 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
1042 }
1043 fn gradient(
1044 &self,
1045 parameters: &[Float],
1046 user_data: &mut (),
1047 ) -> Result<DVector<Float>, LadduError> {
1048 Function::gradient(self.0, parameters, user_data).map(|res| -res)
1049 }
1050}
1051
1052impl NLL {
1053 pub fn minimize(
1056 &self,
1057 p0: &[Float],
1058 bounds: Option<Vec<(Float, Float)>>,
1059 options: Option<MinimizerOptions>,
1060 ) -> Result<Status, LadduError> {
1061 let options = options.unwrap_or_default();
1062 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
1063 .with_bounds(bounds)
1064 .with_max_steps(options.max_steps);
1065 for observer in options.observers {
1066 m = m.with_observer(observer);
1067 }
1068 #[cfg(feature = "rayon")]
1069 {
1070 m.minimize(
1071 self,
1072 p0,
1073 &mut ThreadPoolBuilder::new()
1074 .num_threads(options.threads)
1075 .build()
1076 .map_err(LadduError::from)?,
1077 )?;
1078 }
1079 #[cfg(not(feature = "rayon"))]
1080 {
1081 m.minimize(self, p0, &mut ())?;
1082 }
1083 Ok(m.status)
1084 }
1085 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
1087 &self,
1088 p0: T,
1089 n_steps: usize,
1090 options: Option<MCMCOptions>,
1091 rng: Rng,
1092 ) -> Result<Ensemble, LadduError> {
1093 let options = options.unwrap_or(MCMCOptions::new_ess(
1094 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
1095 rng,
1096 ));
1097 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
1098 for observer in options.observers {
1099 m = m.with_observer(observer);
1100 }
1101 let func = LogLikelihood(self);
1102 #[cfg(feature = "rayon")]
1103 {
1104 m.sample(
1105 &func,
1106 &mut ThreadPoolBuilder::new()
1107 .num_threads(options.threads)
1108 .build()
1109 .map_err(LadduError::from)?,
1110 n_steps,
1111 )?;
1112 }
1113 #[cfg(not(feature = "rayon"))]
1114 {
1115 m.sample(&func, &mut (), n_steps)?;
1116 }
1117 Ok(m.ensemble)
1118 }
1119}
1120
1121#[cfg(feature = "python")]
1133#[pyclass(name = "NLL", module = "laddu")]
1134#[derive(Clone)]
1135pub struct PyNLL(pub Box<NLL>);
1136
1137#[cfg(feature = "python")]
1138#[pymethods]
1139impl PyNLL {
1140 #[new]
1141 #[pyo3(signature = (model, ds_data, ds_accmc))]
1142 fn new(model: &PyModel, ds_data: &PyDataset, ds_accmc: &PyDataset) -> Self {
1143 Self(NLL::new(&model.0, &ds_data.0, &ds_accmc.0))
1144 }
1145 #[getter]
1152 fn data(&self) -> PyDataset {
1153 PyDataset(self.0.data_evaluator.dataset.clone())
1154 }
1155 #[getter]
1162 fn accmc(&self) -> PyDataset {
1163 PyDataset(self.0.accmc_evaluator.dataset.clone())
1164 }
1165 fn as_term(&self) -> PyLikelihoodTerm {
1173 PyLikelihoodTerm(self.0.clone())
1174 }
1175 #[getter]
1182 fn parameters(&self) -> Vec<String> {
1183 self.0.parameters()
1184 }
1185 fn activate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1200 if let Ok(string_arg) = arg.extract::<String>() {
1201 self.0.activate(&string_arg)?;
1202 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1203 let vec: Vec<String> = list_arg.extract()?;
1204 self.0.activate_many(&vec)?;
1205 } else {
1206 return Err(PyTypeError::new_err(
1207 "Argument must be either a string or a list of strings",
1208 ));
1209 }
1210 Ok(())
1211 }
1212 fn activate_all(&self) {
1215 self.0.activate_all();
1216 }
1217 fn deactivate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1234 if let Ok(string_arg) = arg.extract::<String>() {
1235 self.0.deactivate(&string_arg)?;
1236 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1237 let vec: Vec<String> = list_arg.extract()?;
1238 self.0.deactivate_many(&vec)?;
1239 } else {
1240 return Err(PyTypeError::new_err(
1241 "Argument must be either a string or a list of strings",
1242 ));
1243 }
1244 Ok(())
1245 }
1246 fn deactivate_all(&self) {
1249 self.0.deactivate_all();
1250 }
1251 fn isolate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1268 if let Ok(string_arg) = arg.extract::<String>() {
1269 self.0.isolate(&string_arg)?;
1270 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1271 let vec: Vec<String> = list_arg.extract()?;
1272 self.0.isolate_many(&vec)?;
1273 } else {
1274 return Err(PyTypeError::new_err(
1275 "Argument must be either a string or a list of strings",
1276 ));
1277 }
1278 Ok(())
1279 }
1280 #[pyo3(signature = (parameters, *, threads=None))]
1304 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
1305 #[cfg(feature = "rayon")]
1306 {
1307 Ok(ThreadPoolBuilder::new()
1308 .num_threads(threads.unwrap_or_else(num_cpus::get))
1309 .build()
1310 .map_err(LadduError::from)?
1311 .install(|| LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters)))
1312 }
1313 #[cfg(not(feature = "rayon"))]
1314 {
1315 Ok(LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters))
1316 }
1317 }
1318 #[pyo3(signature = (parameters, *, threads=None))]
1339 fn evaluate_gradient<'py>(
1340 &self,
1341 py: Python<'py>,
1342 parameters: Vec<Float>,
1343 threads: Option<usize>,
1344 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1345 #[cfg(feature = "rayon")]
1346 {
1347 Ok(PyArray1::from_slice(
1348 py,
1349 ThreadPoolBuilder::new()
1350 .num_threads(threads.unwrap_or_else(num_cpus::get))
1351 .build()
1352 .map_err(LadduError::from)?
1353 .install(|| self.0.evaluate_gradient(¶meters))
1354 .as_slice(),
1355 ))
1356 }
1357 #[cfg(not(feature = "rayon"))]
1358 {
1359 Ok(PyArray1::from_slice(
1360 py,
1361 self.0.evaluate_gradient(¶meters).as_slice(),
1362 ))
1363 }
1364 }
1365 #[pyo3(signature = (parameters, *, mc_evaluator = None, threads=None))]
1392 fn project<'py>(
1393 &self,
1394 py: Python<'py>,
1395 parameters: Vec<Float>,
1396 mc_evaluator: Option<PyEvaluator>,
1397 threads: Option<usize>,
1398 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1399 #[cfg(feature = "rayon")]
1400 {
1401 Ok(PyArray1::from_slice(
1402 py,
1403 ThreadPoolBuilder::new()
1404 .num_threads(threads.unwrap_or_else(num_cpus::get))
1405 .build()
1406 .map_err(LadduError::from)?
1407 .install(|| {
1408 self.0
1409 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1410 })
1411 .as_slice(),
1412 ))
1413 }
1414 #[cfg(not(feature = "rayon"))]
1415 {
1416 Ok(PyArray1::from_slice(
1417 py,
1418 self.0
1419 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1420 .as_slice(),
1421 ))
1422 }
1423 }
1424
1425 #[pyo3(signature = (parameters, arg, *, mc_evaluator = None, threads=None))]
1463 fn project_with<'py>(
1464 &self,
1465 py: Python<'py>,
1466 parameters: Vec<Float>,
1467 arg: &Bound<'_, PyAny>,
1468 mc_evaluator: Option<PyEvaluator>,
1469 threads: Option<usize>,
1470 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1471 let names = if let Ok(string_arg) = arg.extract::<String>() {
1472 vec![string_arg]
1473 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1474 let vec: Vec<String> = list_arg.extract()?;
1475 vec
1476 } else {
1477 return Err(PyTypeError::new_err(
1478 "Argument must be either a string or a list of strings",
1479 ));
1480 };
1481 #[cfg(feature = "rayon")]
1482 {
1483 Ok(PyArray1::from_slice(
1484 py,
1485 ThreadPoolBuilder::new()
1486 .num_threads(threads.unwrap_or_else(num_cpus::get))
1487 .build()
1488 .map_err(LadduError::from)?
1489 .install(|| {
1490 self.0.project_with(
1491 ¶meters,
1492 &names,
1493 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1494 )
1495 })?
1496 .as_slice(),
1497 ))
1498 }
1499 #[cfg(not(feature = "rayon"))]
1500 {
1501 Ok(PyArray1::from_slice(
1502 py,
1503 self.0
1504 .project_with(
1505 ¶meters,
1506 &names,
1507 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1508 )?
1509 .as_slice(),
1510 ))
1511 }
1512 }
1513
1514 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
1594 #[allow(clippy::too_many_arguments)]
1595 fn minimize(
1596 &self,
1597 p0: Vec<Float>,
1598 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
1599 method: &str,
1600 max_steps: usize,
1601 debug: bool,
1602 verbose: bool,
1603 kwargs: Option<&Bound<'_, PyDict>>,
1604 ) -> PyResult<PyStatus> {
1605 let bounds = bounds.map(|bounds_vec| {
1606 bounds_vec
1607 .iter()
1608 .map(|(opt_lb, opt_ub)| {
1609 (
1610 opt_lb.unwrap_or(Float::NEG_INFINITY),
1611 opt_ub.unwrap_or(Float::INFINITY),
1612 )
1613 })
1614 .collect()
1615 });
1616 let n_parameters = p0.len();
1617 let options =
1618 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
1619 let status = self.0.minimize(&p0, bounds, Some(options))?;
1620 Ok(PyStatus(status))
1621 }
1622 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
1687 #[allow(clippy::too_many_arguments)]
1688 fn mcmc(
1689 &self,
1690 p0: Vec<Vec<Float>>,
1691 n_steps: usize,
1692 method: &str,
1693 debug: bool,
1694 verbose: bool,
1695 seed: u64,
1696 kwargs: Option<&Bound<'_, PyDict>>,
1697 ) -> PyResult<PyEnsemble> {
1698 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
1699 let mut rng = Rng::new();
1700 rng.seed(seed);
1701 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
1702 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
1703 Ok(PyEnsemble(ensemble))
1704 }
1705}
1706
1707#[derive(Clone, Debug)]
1710pub struct LikelihoodID(usize, Option<String>);
1711
1712impl Display for LikelihoodID {
1713 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1714 if let Some(name) = &self.1 {
1715 write!(f, "{}({})", name, self.0)
1716 } else {
1717 write!(f, "({})", self.0)
1718 }
1719 }
1720}
1721
1722#[cfg(feature = "python")]
1729#[pyclass(name = "LikelihoodID", module = "laddu")]
1730#[derive(Clone)]
1731pub struct PyLikelihoodID(LikelihoodID);
1732
1733#[cfg(feature = "python")]
1734#[pymethods]
1735impl PyLikelihoodID {
1736 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1737 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1738 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
1739 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1740 Ok(PyLikelihoodExpression(
1741 self.0.clone() + other_expr.0.clone(),
1742 ))
1743 } else if let Ok(int) = other.extract::<usize>() {
1744 if int == 0 {
1745 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1746 self.0.clone(),
1747 )))
1748 } else {
1749 Err(PyTypeError::new_err(
1750 "Addition with an integer for this type is only defined for 0",
1751 ))
1752 }
1753 } else {
1754 Err(PyTypeError::new_err("Unsupported operand type for +"))
1755 }
1756 }
1757 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1758 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1759 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
1760 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1761 Ok(PyLikelihoodExpression(
1762 other_expr.0.clone() + self.0.clone(),
1763 ))
1764 } else if let Ok(int) = other.extract::<usize>() {
1765 if int == 0 {
1766 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1767 self.0.clone(),
1768 )))
1769 } else {
1770 Err(PyTypeError::new_err(
1771 "Addition with an integer for this type is only defined for 0",
1772 ))
1773 }
1774 } else {
1775 Err(PyTypeError::new_err("Unsupported operand type for +"))
1776 }
1777 }
1778 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1779 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1780 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
1781 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1782 Ok(PyLikelihoodExpression(
1783 self.0.clone() * other_expr.0.clone(),
1784 ))
1785 } else {
1786 Err(PyTypeError::new_err("Unsupported operand type for *"))
1787 }
1788 }
1789 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1790 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1791 Ok(PyLikelihoodExpression(other_aid.0.clone() * self.0.clone()))
1792 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1793 Ok(PyLikelihoodExpression(
1794 other_expr.0.clone() * self.0.clone(),
1795 ))
1796 } else {
1797 Err(PyTypeError::new_err("Unsupported operand type for *"))
1798 }
1799 }
1800 fn __str__(&self) -> String {
1801 format!("{}", self.0)
1802 }
1803 fn __repr__(&self) -> String {
1804 format!("{:?}", self.0)
1805 }
1806}
1807
1808#[derive(Default, Clone)]
1810pub struct LikelihoodManager {
1811 terms: Vec<Box<dyn LikelihoodTerm>>,
1812 param_name_to_index: HashMap<String, usize>,
1813 param_names: Vec<String>,
1814 param_layouts: Vec<Vec<usize>>,
1815 param_counts: Vec<usize>,
1816}
1817
1818impl LikelihoodManager {
1819 pub fn register(&mut self, term: Box<dyn LikelihoodTerm>) -> LikelihoodID {
1822 let term_idx = self.terms.len();
1823 for param_name in term.parameters() {
1824 if !self.param_name_to_index.contains_key(¶m_name) {
1825 self.param_name_to_index
1826 .insert(param_name.clone(), self.param_name_to_index.len());
1827 self.param_names.push(param_name.clone());
1828 }
1829 }
1830 let param_layout: Vec<usize> = term
1831 .parameters()
1832 .iter()
1833 .map(|name| self.param_name_to_index[name])
1834 .collect();
1835 let param_count = term.parameters().len();
1836 self.param_layouts.push(param_layout);
1837 self.param_counts.push(param_count);
1838 self.terms.push(term.clone());
1839
1840 LikelihoodID(term_idx, None)
1841 }
1842
1843 pub fn register_with_name<T: AsRef<str>>(
1846 &mut self,
1847 term: Box<dyn LikelihoodTerm>,
1848 name: T,
1849 ) -> LikelihoodID {
1850 let term_idx = self.terms.len();
1851 for param_name in term.parameters() {
1852 if !self.param_name_to_index.contains_key(¶m_name) {
1853 self.param_name_to_index
1854 .insert(param_name.clone(), self.param_name_to_index.len());
1855 self.param_names.push(param_name.clone());
1856 }
1857 }
1858 let param_layout: Vec<usize> = term
1859 .parameters()
1860 .iter()
1861 .map(|name| self.param_name_to_index[name])
1862 .collect();
1863 let param_count = term.parameters().len();
1864 self.param_layouts.push(param_layout);
1865 self.param_counts.push(param_count);
1866 self.terms.push(term.clone());
1867
1868 LikelihoodID(term_idx, Some(name.as_ref().to_string().clone()))
1869 }
1870
1871 pub fn parameters(&self) -> Vec<String> {
1875 self.param_names.clone()
1876 }
1877
1878 pub fn load(&self, likelihood_expression: &LikelihoodExpression) -> LikelihoodEvaluator {
1881 LikelihoodEvaluator {
1882 likelihood_manager: self.clone(),
1883 likelihood_expression: likelihood_expression.clone(),
1884 }
1885 }
1886}
1887
1888#[cfg(feature = "python")]
1891#[pyclass(name = "LikelihoodManager", module = "laddu")]
1892#[derive(Clone)]
1893pub struct PyLikelihoodManager(LikelihoodManager);
1894
1895#[cfg(feature = "python")]
1896#[pymethods]
1897impl PyLikelihoodManager {
1898 #[new]
1899 fn new() -> Self {
1900 Self(LikelihoodManager::default())
1901 }
1902 #[pyo3(signature = (likelihood_term, *, name = None))]
1916 fn register(
1917 &mut self,
1918 likelihood_term: &PyLikelihoodTerm,
1919 name: Option<String>,
1920 ) -> PyLikelihoodID {
1921 if let Some(name) = name {
1922 PyLikelihoodID(self.0.register_with_name(likelihood_term.0.clone(), name))
1923 } else {
1924 PyLikelihoodID(self.0.register(likelihood_term.0.clone()))
1925 }
1926 }
1927 fn parameters(&self) -> Vec<String> {
1935 self.0.parameters()
1936 }
1937 fn load(&self, likelihood_expression: &PyLikelihoodExpression) -> PyLikelihoodEvaluator {
1962 PyLikelihoodEvaluator(self.0.load(&likelihood_expression.0))
1963 }
1964}
1965
1966#[derive(Debug)]
1967struct LikelihoodValues(Vec<Float>);
1968
1969#[derive(Debug)]
1970struct LikelihoodGradients(Vec<DVector<Float>>);
1971
1972#[derive(Clone, Default)]
1974pub enum LikelihoodExpression {
1975 #[default]
1977 Zero,
1978 One,
1980 Term(LikelihoodID),
1982 Add(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1984 Mul(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1986}
1987
1988impl Debug for LikelihoodExpression {
1989 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1990 self.write_tree(f, "", "", "")
1991 }
1992}
1993
1994impl Display for LikelihoodExpression {
1995 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1996 write!(f, "{:?}", self)
1997 }
1998}
1999
2000impl LikelihoodExpression {
2001 fn evaluate(&self, likelihood_values: &LikelihoodValues) -> Float {
2002 match self {
2003 LikelihoodExpression::Zero => 0.0,
2004 LikelihoodExpression::One => 1.0,
2005 LikelihoodExpression::Term(lid) => likelihood_values.0[lid.0],
2006 LikelihoodExpression::Add(a, b) => {
2007 a.evaluate(likelihood_values) + b.evaluate(likelihood_values)
2008 }
2009 LikelihoodExpression::Mul(a, b) => {
2010 a.evaluate(likelihood_values) * b.evaluate(likelihood_values)
2011 }
2012 }
2013 }
2014 fn evaluate_gradient(
2015 &self,
2016 likelihood_values: &LikelihoodValues,
2017 likelihood_gradients: &LikelihoodGradients,
2018 ) -> DVector<Float> {
2019 match self {
2020 LikelihoodExpression::Zero => DVector::zeros(0),
2021 LikelihoodExpression::One => DVector::zeros(0),
2022 LikelihoodExpression::Term(lid) => likelihood_gradients.0[lid.0].clone(),
2023 LikelihoodExpression::Add(a, b) => {
2024 a.evaluate_gradient(likelihood_values, likelihood_gradients)
2025 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2026 }
2027 LikelihoodExpression::Mul(a, b) => {
2028 a.evaluate_gradient(likelihood_values, likelihood_gradients)
2029 * b.evaluate(likelihood_values)
2030 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2031 * a.evaluate(likelihood_values)
2032 }
2033 }
2034 }
2035 fn write_tree(
2037 &self,
2038 f: &mut std::fmt::Formatter<'_>,
2039 parent_prefix: &str,
2040 immediate_prefix: &str,
2041 parent_suffix: &str,
2042 ) -> std::fmt::Result {
2043 let display_string = match self {
2044 Self::Zero => "0".to_string(),
2045 Self::One => "1".to_string(),
2046 Self::Term(lid) => {
2047 format!("{}", lid.0)
2048 }
2049 Self::Add(_, _) => "+".to_string(),
2050 Self::Mul(_, _) => "*".to_string(),
2051 };
2052 writeln!(f, "{}{}{}", parent_prefix, immediate_prefix, display_string)?;
2053 match self {
2054 Self::Term(_) | Self::Zero | Self::One => {}
2055 Self::Add(a, b) | Self::Mul(a, b) => {
2056 let terms = [a, b];
2057 let mut it = terms.iter().peekable();
2058 let child_prefix = format!("{}{}", parent_prefix, parent_suffix);
2059 while let Some(child) = it.next() {
2060 match it.peek() {
2061 Some(_) => child.write_tree(f, &child_prefix, "├─ ", "│ "),
2062 None => child.write_tree(f, &child_prefix, "└─ ", " "),
2063 }?;
2064 }
2065 }
2066 }
2067 Ok(())
2068 }
2069}
2070
2071impl_op_ex!(+ |a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(a.clone()), Box::new(b.clone()))});
2072impl_op_ex!(
2073 *|a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression {
2074 LikelihoodExpression::Mul(Box::new(a.clone()), Box::new(b.clone()))
2075 }
2076);
2077impl_op_ex_commutative!(+ |a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(b.clone()))});
2078impl_op_ex_commutative!(
2079 *|a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression {
2080 LikelihoodExpression::Mul(
2081 Box::new(LikelihoodExpression::Term(a.clone())),
2082 Box::new(b.clone()),
2083 )
2084 }
2085);
2086impl_op_ex!(+ |a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(LikelihoodExpression::Term(b.clone())))});
2087impl_op_ex!(
2088 *|a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression {
2089 LikelihoodExpression::Mul(
2090 Box::new(LikelihoodExpression::Term(a.clone())),
2091 Box::new(LikelihoodExpression::Term(b.clone())),
2092 )
2093 }
2094);
2095
2096#[cfg(feature = "python")]
2099#[pyclass(name = "LikelihoodExpression", module = "laddu")]
2100#[derive(Clone)]
2101pub struct PyLikelihoodExpression(LikelihoodExpression);
2102
2103#[cfg(feature = "python")]
2106#[pyfunction(name = "likelihood_sum")]
2107pub fn py_likelihood_sum(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2108 if terms.is_empty() {
2109 return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2110 }
2111 if terms.len() == 1 {
2112 let term = &terms[0];
2113 if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2114 return Ok(expression);
2115 }
2116 if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2117 return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2118 py_amplitude_id.0,
2119 )));
2120 }
2121 return Err(PyTypeError::new_err(
2122 "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2123 ));
2124 }
2125 let mut iter = terms.iter();
2126 let Some(first_term) = iter.next() else {
2127 return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2128 };
2129 if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2130 let mut summation = first_expression.clone();
2131 for term in iter {
2132 summation = summation.__add__(term)?;
2133 }
2134 return Ok(summation);
2135 }
2136 if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2137 let mut summation =
2138 PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2139 for term in iter {
2140 summation = summation.__add__(term)?;
2141 }
2142 return Ok(summation);
2143 }
2144 Err(PyTypeError::new_err(
2145 "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2146 ))
2147}
2148
2149#[cfg(feature = "python")]
2152#[pyfunction(name = "likelihood_product")]
2153pub fn py_likelihood_product(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2154 if terms.is_empty() {
2155 return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2156 }
2157 if terms.len() == 1 {
2158 let term = &terms[0];
2159 if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2160 return Ok(expression);
2161 }
2162 if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2163 return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2164 py_amplitude_id.0,
2165 )));
2166 }
2167 return Err(PyTypeError::new_err(
2168 "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2169 ));
2170 }
2171 let mut iter = terms.iter();
2172 let Some(first_term) = iter.next() else {
2173 return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2174 };
2175 if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2176 let mut product = first_expression.clone();
2177 for term in iter {
2178 product = product.__mul__(term)?;
2179 }
2180 return Ok(product);
2181 }
2182 if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2183 let mut product = PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2184 for term in iter {
2185 product = product.__mul__(term)?;
2186 }
2187 return Ok(product);
2188 }
2189 Err(PyTypeError::new_err(
2190 "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2191 ))
2192}
2193
2194#[cfg(feature = "python")]
2197#[pyfunction(name = "LikelihoodZero")]
2198pub fn py_likelihood_zero() -> PyLikelihoodExpression {
2199 PyLikelihoodExpression(LikelihoodExpression::Zero)
2200}
2201
2202#[cfg(feature = "python")]
2205#[pyfunction(name = "LikelihoodOne")]
2206pub fn py_likelihood_one() -> PyLikelihoodExpression {
2207 PyLikelihoodExpression(LikelihoodExpression::One)
2208}
2209
2210#[cfg(feature = "python")]
2211#[pymethods]
2212impl PyLikelihoodExpression {
2213 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2214 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2215 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
2216 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2217 Ok(PyLikelihoodExpression(
2218 self.0.clone() + other_expr.0.clone(),
2219 ))
2220 } else if let Ok(int) = other.extract::<usize>() {
2221 if int == 0 {
2222 Ok(PyLikelihoodExpression(self.0.clone()))
2223 } else {
2224 Err(PyTypeError::new_err(
2225 "Addition with an integer for this type is only defined for 0",
2226 ))
2227 }
2228 } else {
2229 Err(PyTypeError::new_err("Unsupported operand type for +"))
2230 }
2231 }
2232 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2233 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2234 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
2235 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2236 Ok(PyLikelihoodExpression(
2237 other_expr.0.clone() + self.0.clone(),
2238 ))
2239 } else if let Ok(int) = other.extract::<usize>() {
2240 if int == 0 {
2241 Ok(PyLikelihoodExpression(self.0.clone()))
2242 } else {
2243 Err(PyTypeError::new_err(
2244 "Addition with an integer for this type is only defined for 0",
2245 ))
2246 }
2247 } else {
2248 Err(PyTypeError::new_err("Unsupported operand type for +"))
2249 }
2250 }
2251 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2252 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2253 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2254 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2255 Ok(PyLikelihoodExpression(
2256 self.0.clone() * other_expr.0.clone(),
2257 ))
2258 } else {
2259 Err(PyTypeError::new_err("Unsupported operand type for *"))
2260 }
2261 }
2262 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2263 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2264 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2265 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2266 Ok(PyLikelihoodExpression(
2267 self.0.clone() * other_expr.0.clone(),
2268 ))
2269 } else {
2270 Err(PyTypeError::new_err("Unsupported operand type for *"))
2271 }
2272 }
2273 fn __str__(&self) -> String {
2274 format!("{}", self.0)
2275 }
2276 fn __repr__(&self) -> String {
2277 format!("{:?}", self.0)
2278 }
2279}
2280
2281pub struct LikelihoodEvaluator {
2283 likelihood_manager: LikelihoodManager,
2284 likelihood_expression: LikelihoodExpression,
2285}
2286
2287#[cfg(feature = "rayon")]
2288impl Function<ThreadPool, LadduError> for LikelihoodEvaluator {
2289 fn evaluate(
2290 &self,
2291 parameters: &[Float],
2292 thread_pool: &mut ThreadPool,
2293 ) -> Result<Float, LadduError> {
2294 thread_pool.install(|| self.evaluate(parameters))
2295 }
2296 fn gradient(
2297 &self,
2298 parameters: &[Float],
2299 thread_pool: &mut ThreadPool,
2300 ) -> Result<DVector<Float>, LadduError> {
2301 thread_pool.install(|| self.evaluate_gradient(parameters))
2302 }
2303}
2304
2305#[cfg(not(feature = "rayon"))]
2306impl Function<(), LadduError> for LikelihoodEvaluator {
2307 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
2308 self.evaluate(parameters)
2309 }
2310 fn gradient(
2311 &self,
2312 parameters: &[Float],
2313 _user_data: &mut (),
2314 ) -> Result<DVector<Float>, LadduError> {
2315 self.evaluate_gradient(parameters)
2316 }
2317}
2318
2319pub(crate) struct NegativeLikelihoodEvaluator<'a>(&'a LikelihoodEvaluator);
2320#[cfg(feature = "rayon")]
2321impl Function<ThreadPool, LadduError> for NegativeLikelihoodEvaluator<'_> {
2322 fn evaluate(
2323 &self,
2324 parameters: &[Float],
2325 thread_pool: &mut ThreadPool,
2326 ) -> Result<Float, LadduError> {
2327 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
2328 }
2329 fn gradient(
2330 &self,
2331 parameters: &[Float],
2332 thread_pool: &mut ThreadPool,
2333 ) -> Result<DVector<Float>, LadduError> {
2334 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
2335 }
2336}
2337
2338#[cfg(not(feature = "rayon"))]
2339impl Function<(), LadduError> for NegativeLikelihoodEvaluator<'_> {
2340 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
2341 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
2342 }
2343 fn gradient(
2344 &self,
2345 parameters: &[Float],
2346 user_data: &mut (),
2347 ) -> Result<DVector<Float>, LadduError> {
2348 Function::gradient(self.0, parameters, user_data).map(|res| -res)
2349 }
2350}
2351
2352impl LikelihoodEvaluator {
2353 pub fn parameters(&self) -> Vec<String> {
2355 self.likelihood_manager.parameters()
2356 }
2357 pub fn evaluate(&self, parameters: &[Float]) -> Result<Float, LadduError> {
2360 let mut param_buffers: Vec<Vec<Float>> = self
2361 .likelihood_manager
2362 .param_counts
2363 .iter()
2364 .map(|&count| vec![0.0; count])
2365 .collect();
2366 for (layout, buffer) in self
2367 .likelihood_manager
2368 .param_layouts
2369 .iter()
2370 .zip(param_buffers.iter_mut())
2371 {
2372 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2373 buffer[buffer_idx] = parameters[param_idx];
2374 }
2375 }
2376 let likelihood_values = LikelihoodValues(
2377 self.likelihood_manager
2378 .terms
2379 .iter()
2380 .zip(param_buffers.iter())
2381 .map(|(term, buffer)| term.evaluate(buffer))
2382 .collect(),
2383 );
2384 Ok(self.likelihood_expression.evaluate(&likelihood_values))
2385 }
2386
2387 pub fn evaluate_gradient(&self, parameters: &[Float]) -> Result<DVector<Float>, LadduError> {
2390 let mut param_buffers: Vec<Vec<Float>> = self
2391 .likelihood_manager
2392 .param_counts
2393 .iter()
2394 .map(|&count| vec![0.0; count])
2395 .collect();
2396 for (layout, buffer) in self
2397 .likelihood_manager
2398 .param_layouts
2399 .iter()
2400 .zip(param_buffers.iter_mut())
2401 {
2402 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2403 buffer[buffer_idx] = parameters[param_idx];
2404 }
2405 }
2406 let likelihood_values = LikelihoodValues(
2407 self.likelihood_manager
2408 .terms
2409 .iter()
2410 .zip(param_buffers.iter())
2411 .map(|(term, buffer)| term.evaluate(buffer))
2412 .collect(),
2413 );
2414 let mut gradient_buffers: Vec<DVector<Float>> = (0..self.likelihood_manager.terms.len())
2415 .map(|_| DVector::zeros(self.likelihood_manager.param_names.len()))
2416 .collect();
2417 for (((term, param_buffer), gradient_buffer), layout) in self
2418 .likelihood_manager
2419 .terms
2420 .iter()
2421 .zip(param_buffers.iter())
2422 .zip(gradient_buffers.iter_mut())
2423 .zip(self.likelihood_manager.param_layouts.iter())
2424 {
2425 let term_gradient = term.evaluate_gradient(param_buffer); for (term_idx, &buffer_idx) in layout.iter().enumerate() {
2427 gradient_buffer[buffer_idx] = term_gradient[term_idx] }
2429 }
2430 let likelihood_gradients = LikelihoodGradients(gradient_buffers);
2431 Ok(self
2432 .likelihood_expression
2433 .evaluate_gradient(&likelihood_values, &likelihood_gradients))
2434 }
2435
2436 pub fn minimize(
2441 &self,
2442 p0: &[Float],
2443 bounds: Option<Vec<(Float, Float)>>,
2444 options: Option<MinimizerOptions>,
2445 ) -> Result<Status, LadduError> {
2446 let options = options.unwrap_or_default();
2447 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
2448 .with_bounds(bounds)
2449 .with_max_steps(options.max_steps);
2450 for observer in options.observers {
2451 m = m.with_observer(observer)
2452 }
2453 #[cfg(feature = "rayon")]
2454 {
2455 m.minimize(
2456 self,
2457 p0,
2458 &mut ThreadPoolBuilder::new()
2459 .num_threads(options.threads)
2460 .build()
2461 .map_err(LadduError::from)?,
2462 )?;
2463 }
2464 #[cfg(not(feature = "rayon"))]
2465 {
2466 m.minimize(self, p0, &mut ())?;
2467 }
2468 Ok(m.status)
2469 }
2470
2471 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
2477 &self,
2478 p0: T,
2479 n_steps: usize,
2480 options: Option<MCMCOptions>,
2481 rng: Rng,
2482 ) -> Result<Ensemble, LadduError> {
2483 let options = options.unwrap_or(MCMCOptions::new_ess(
2484 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
2485 rng,
2486 ));
2487 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
2488 for observer in options.observers {
2489 m = m.with_observer(observer);
2490 }
2491 let func = NegativeLikelihoodEvaluator(self);
2492 #[cfg(feature = "rayon")]
2493 {
2494 m.sample(
2495 &func,
2496 &mut ThreadPoolBuilder::new()
2497 .num_threads(options.threads)
2498 .build()
2499 .map_err(LadduError::from)?,
2500 n_steps,
2501 )?;
2502 }
2503 #[cfg(not(feature = "rayon"))]
2504 {
2505 m.sample(&func, &mut (), n_steps)?;
2506 }
2507 Ok(m.ensemble)
2508 }
2509}
2510
2511#[cfg(feature = "python")]
2515#[pyclass(name = "LikelihoodEvaluator", module = "laddu")]
2516pub struct PyLikelihoodEvaluator(LikelihoodEvaluator);
2517
2518#[cfg(feature = "python")]
2519#[pymethods]
2520impl PyLikelihoodEvaluator {
2521 #[getter]
2528 fn parameters(&self) -> Vec<String> {
2529 self.0.parameters()
2530 }
2531 #[pyo3(signature = (parameters, *, threads=None))]
2551 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
2552 #[cfg(feature = "rayon")]
2553 {
2554 Ok(ThreadPoolBuilder::new()
2555 .num_threads(threads.unwrap_or_else(num_cpus::get))
2556 .build()
2557 .map_err(LadduError::from)?
2558 .install(|| self.0.evaluate(¶meters))?)
2559 }
2560 #[cfg(not(feature = "rayon"))]
2561 {
2562 Ok(self.0.evaluate(¶meters)?)
2563 }
2564 }
2565 #[pyo3(signature = (parameters, *, threads=None))]
2587 fn evaluate_gradient<'py>(
2588 &self,
2589 py: Python<'py>,
2590 parameters: Vec<Float>,
2591 threads: Option<usize>,
2592 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
2593 #[cfg(feature = "rayon")]
2594 {
2595 Ok(PyArray1::from_slice(
2596 py,
2597 ThreadPoolBuilder::new()
2598 .num_threads(threads.unwrap_or_else(num_cpus::get))
2599 .build()
2600 .map_err(LadduError::from)?
2601 .install(|| self.0.evaluate_gradient(¶meters))?
2602 .as_slice(),
2603 ))
2604 }
2605 #[cfg(not(feature = "rayon"))]
2606 {
2607 Ok(PyArray1::from_slice(
2608 py,
2609 self.0.evaluate_gradient(¶meters)?.as_slice(),
2610 ))
2611 }
2612 }
2613
2614 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
2691 #[allow(clippy::too_many_arguments)]
2692 fn minimize(
2693 &self,
2694 p0: Vec<Float>,
2695 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
2696 method: &str,
2697 max_steps: usize,
2698 debug: bool,
2699 verbose: bool,
2700 kwargs: Option<&Bound<'_, PyDict>>,
2701 ) -> PyResult<PyStatus> {
2702 let bounds = bounds.map(|bounds_vec| {
2703 bounds_vec
2704 .iter()
2705 .map(|(opt_lb, opt_ub)| {
2706 (
2707 opt_lb.unwrap_or(Float::NEG_INFINITY),
2708 opt_ub.unwrap_or(Float::INFINITY),
2709 )
2710 })
2711 .collect()
2712 });
2713 let n_parameters = p0.len();
2714 let options =
2715 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
2716 let status = self.0.minimize(&p0, bounds, Some(options))?;
2717 Ok(PyStatus(status))
2718 }
2719
2720 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
2785 #[allow(clippy::too_many_arguments)]
2786 fn mcmc(
2787 &self,
2788 p0: Vec<Vec<Float>>,
2789 n_steps: usize,
2790 method: &str,
2791 debug: bool,
2792 verbose: bool,
2793 seed: u64,
2794 kwargs: Option<&Bound<'_, PyDict>>,
2795 ) -> PyResult<PyEnsemble> {
2796 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
2797 let mut rng = Rng::new();
2798 rng.seed(seed);
2799 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
2800 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
2801 Ok(PyEnsemble(ensemble))
2802 }
2803}
2804
2805#[derive(Clone)]
2807pub struct LikelihoodScalar(String);
2808
2809impl LikelihoodScalar {
2810 pub fn new<T: AsRef<str>>(name: T) -> Box<Self> {
2812 Self(name.as_ref().into()).into()
2813 }
2814}
2815
2816impl LikelihoodTerm for LikelihoodScalar {
2817 fn evaluate(&self, parameters: &[Float]) -> Float {
2818 parameters[0]
2819 }
2820
2821 fn evaluate_gradient(&self, _parameters: &[Float]) -> DVector<Float> {
2822 DVector::from_vec(vec![1.0])
2823 }
2824
2825 fn parameters(&self) -> Vec<String> {
2826 vec![self.0.clone()]
2827 }
2828}
2829
2830#[cfg(feature = "python")]
2842#[pyfunction(name = "LikelihoodScalar")]
2843pub fn py_likelihood_scalar(name: String) -> PyLikelihoodTerm {
2844 PyLikelihoodTerm(LikelihoodScalar::new(name))
2845}