1use std::{
2 collections::HashMap,
3 fmt::{Debug, Display},
4 sync::Arc,
5};
6
7use accurate::{sum::Klein, traits::*};
8use auto_ops::*;
9use dyn_clone::DynClone;
10use fastrand::Rng;
11use ganesh::{
12 mcmc::{ESSMove, Ensemble},
13 Function, Minimizer, Sampler, Status,
14};
15use laddu_core::{
16 amplitudes::{central_difference, AmplitudeValues, Evaluator, GradientValues, Model},
17 data::Dataset,
18 resources::Parameters,
19 Complex, DVector, Float, LadduError,
20};
21
22#[cfg(feature = "mpi")]
23use laddu_core::mpi::LadduMPI;
24
25#[cfg(feature = "mpi")]
26use mpi::{datatype::PartitionMut, topology::SimpleCommunicator, traits::*};
27
28#[cfg(feature = "python")]
29use crate::ganesh_ext::py_ganesh::{
30 py_parse_mcmc_options, py_parse_minimizer_options, PyEnsemble, PyStatus,
31};
32#[cfg(feature = "python")]
33use laddu_python::{
34 amplitudes::{PyEvaluator, PyModel},
35 data::PyDataset,
36};
37#[cfg(feature = "python")]
38use numpy::PyArray1;
39#[cfg(feature = "python")]
40use pyo3::{
41 exceptions::PyTypeError,
42 prelude::*,
43 types::{PyDict, PyList},
44};
45#[cfg(feature = "rayon")]
46use rayon::{prelude::*, ThreadPool, ThreadPoolBuilder};
47
48use crate::ganesh_ext::{MCMCOptions, MinimizerOptions};
49
50pub trait LikelihoodTerm: DynClone + Send + Sync {
53 fn evaluate(&self, parameters: &[Float]) -> Float;
55 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
57 central_difference(parameters, |parameters: &[Float]| self.evaluate(parameters))
58 }
59 fn parameters(&self) -> Vec<String>;
61}
62
63dyn_clone::clone_trait_object!(LikelihoodTerm);
64
65#[cfg(feature = "python")]
72#[pyclass(name = "LikelihoodTerm", module = "laddu")]
73#[derive(Clone)]
74pub struct PyLikelihoodTerm(pub Box<dyn LikelihoodTerm>);
75
76#[derive(Clone)]
78pub struct NLL {
79 pub data_evaluator: Evaluator,
81 pub accmc_evaluator: Evaluator,
83}
84
85impl NLL {
86 pub fn new(model: &Model, ds_data: &Arc<Dataset>, ds_accmc: &Arc<Dataset>) -> Box<Self> {
89 Self {
90 data_evaluator: model.load(ds_data),
91 accmc_evaluator: model.load(ds_accmc),
92 }
93 .into()
94 }
95 pub fn activate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
97 self.data_evaluator.activate(&name)?;
98 self.accmc_evaluator.activate(&name)
99 }
100 pub fn activate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
102 self.data_evaluator.activate_many(names)?;
103 self.accmc_evaluator.activate_many(names)
104 }
105 pub fn activate_all(&self) {
107 self.data_evaluator.activate_all();
108 self.accmc_evaluator.activate_all();
109 }
110 pub fn deactivate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
112 self.data_evaluator.deactivate(&name)?;
113 self.accmc_evaluator.deactivate(&name)
114 }
115 pub fn deactivate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
117 self.data_evaluator.deactivate_many(names)?;
118 self.accmc_evaluator.deactivate_many(names)
119 }
120 pub fn deactivate_all(&self) {
122 self.data_evaluator.deactivate_all();
123 self.accmc_evaluator.deactivate_all();
124 }
125 pub fn isolate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
127 self.data_evaluator.isolate(&name)?;
128 self.accmc_evaluator.isolate(&name)
129 }
130 pub fn isolate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
132 self.data_evaluator.isolate_many(names)?;
133 self.accmc_evaluator.isolate_many(names)
134 }
135
136 pub fn project_local(
145 &self,
146 parameters: &[Float],
147 mc_evaluator: Option<Evaluator>,
148 ) -> Vec<Float> {
149 let (mc_dataset, result) = if let Some(mc_evaluator) = mc_evaluator {
150 (
151 mc_evaluator.dataset.clone(),
152 mc_evaluator.evaluate_local(parameters),
153 )
154 } else {
155 (
156 self.accmc_evaluator.dataset.clone(),
157 self.accmc_evaluator.evaluate_local(parameters),
158 )
159 };
160 let n_mc = self.accmc_evaluator.dataset.n_events();
161 #[cfg(feature = "rayon")]
162 let output: Vec<Float> = result
163 .par_iter()
164 .zip(mc_dataset.events.par_iter())
165 .map(|(l, e)| e.weight * l.re / n_mc as Float)
166 .collect();
167
168 #[cfg(not(feature = "rayon"))]
169 let output: Vec<Float> = result
170 .iter()
171 .zip(mc_dataset.events.iter())
172 .map(|(l, e)| e.weight * l.re / n_mc as Float)
173 .collect();
174 output
175 }
176
177 #[cfg(feature = "mpi")]
186 pub fn project_mpi(
187 &self,
188 parameters: &[Float],
189 mc_evaluator: Option<Evaluator>,
190 world: &SimpleCommunicator,
191 ) -> Vec<Float> {
192 let n_events = mc_evaluator
193 .as_ref()
194 .unwrap_or(&self.accmc_evaluator)
195 .dataset
196 .n_events();
197 let local_projection = self.project_local(parameters, mc_evaluator);
198 let mut buffer: Vec<Float> = vec![0.0; n_events];
199 let (counts, displs) = world.get_counts_displs(n_events);
200 {
201 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
202 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
203 }
204 buffer
205 }
206
207 pub fn project(&self, parameters: &[Float], mc_evaluator: Option<Evaluator>) -> Vec<Float> {
221 #[cfg(feature = "mpi")]
222 {
223 if let Some(world) = laddu_core::mpi::get_world() {
224 return self.project_mpi(parameters, mc_evaluator, &world);
225 }
226 }
227 self.project_local(parameters, mc_evaluator)
228 }
229
230 pub fn project_gradient_local(
239 &self,
240 parameters: &[Float],
241 mc_evaluator: Option<Evaluator>,
242 ) -> (Vec<Float>, Vec<DVector<Float>>) {
243 let (mc_dataset, result, result_gradient) = if let Some(mc_evaluator) = mc_evaluator {
244 (
245 mc_evaluator.dataset.clone(),
246 mc_evaluator.evaluate_local(parameters),
247 mc_evaluator.evaluate_gradient_local(parameters),
248 )
249 } else {
250 (
251 self.accmc_evaluator.dataset.clone(),
252 self.accmc_evaluator.evaluate_local(parameters),
253 self.accmc_evaluator.evaluate_gradient_local(parameters),
254 )
255 };
256 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
257 #[cfg(feature = "rayon")]
258 {
259 (
260 result
261 .par_iter()
262 .zip(mc_dataset.events.par_iter())
263 .map(|(l, e)| e.weight * l.re / n_mc)
264 .collect(),
265 result_gradient
266 .par_iter()
267 .zip(mc_dataset.events.par_iter())
268 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
269 .collect(),
270 )
271 }
272 #[cfg(not(feature = "rayon"))]
273 {
274 (
275 result
276 .iter()
277 .zip(mc_dataset.events.iter())
278 .map(|(l, e)| e.weight * l.re / n_mc)
279 .collect(),
280 result_gradient
281 .iter()
282 .zip(mc_dataset.events.iter())
283 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
284 .collect(),
285 )
286 }
287 }
288
289 #[cfg(feature = "mpi")]
298 pub fn project_gradient_mpi(
299 &self,
300 parameters: &[Float],
301 mc_evaluator: Option<Evaluator>,
302 world: &SimpleCommunicator,
303 ) -> (Vec<Float>, Vec<DVector<Float>>) {
304 let n_events = mc_evaluator
305 .as_ref()
306 .unwrap_or(&self.accmc_evaluator)
307 .dataset
308 .n_events();
309 let (local_projection, local_gradient_projection) =
310 self.project_gradient_local(parameters, mc_evaluator);
311 let mut projection_result: Vec<Float> = vec![0.0; n_events];
312 let (counts, displs) = world.get_counts_displs(n_events);
313 {
314 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
315 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
316 }
317
318 let flattened_local_gradient_projection = local_gradient_projection
319 .iter()
320 .flat_map(|g| g.data.as_vec().to_vec())
321 .collect::<Vec<Float>>();
322 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
323 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
324 let mut partitioned_flattened_result_buffer =
325 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
326 world.all_gather_varcount_into(
327 &flattened_local_gradient_projection,
328 &mut partitioned_flattened_result_buffer,
329 );
330 let gradient_projection_result = flattened_result_buffer
331 .chunks(parameters.len())
332 .map(DVector::from_row_slice)
333 .collect();
334 (projection_result, gradient_projection_result)
335 }
336 pub fn project_gradient(
350 &self,
351 parameters: &[Float],
352 mc_evaluator: Option<Evaluator>,
353 ) -> (Vec<Float>, Vec<DVector<Float>>) {
354 #[cfg(feature = "mpi")]
355 {
356 if let Some(world) = laddu_core::mpi::get_world() {
357 return self.project_gradient_mpi(parameters, mc_evaluator, &world);
358 }
359 }
360 self.project_gradient_local(parameters, mc_evaluator)
361 }
362
363 pub fn project_with_local<T: AsRef<str>>(
373 &self,
374 parameters: &[Float],
375 names: &[T],
376 mc_evaluator: Option<Evaluator>,
377 ) -> Result<Vec<Float>, LadduError> {
378 if let Some(mc_evaluator) = &mc_evaluator {
379 let current_active_mc = mc_evaluator.resources.read().active.clone();
380 mc_evaluator.isolate_many(names)?;
381 let mc_dataset = mc_evaluator.dataset.clone();
382 let result = mc_evaluator.evaluate_local(parameters);
383 let n_mc = self.accmc_evaluator.dataset.n_events();
384 #[cfg(feature = "rayon")]
385 let output: Vec<Float> = result
386 .par_iter()
387 .zip(mc_dataset.events.par_iter())
388 .map(|(l, e)| e.weight * l.re / n_mc as Float)
389 .collect();
390 #[cfg(not(feature = "rayon"))]
391 let output: Vec<Float> = result
392 .iter()
393 .zip(mc_dataset.events.iter())
394 .map(|(l, e)| e.weight * l.re / n_mc as Float)
395 .collect();
396 mc_evaluator.resources.write().active = current_active_mc;
397 Ok(output)
398 } else {
399 let current_active_data = self.data_evaluator.resources.read().active.clone();
400 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
401 self.isolate_many(names)?;
402 let mc_dataset = &self.accmc_evaluator.dataset;
403 let result = self.accmc_evaluator.evaluate_local(parameters);
404 let n_mc = self.accmc_evaluator.dataset.n_events();
405 #[cfg(feature = "rayon")]
406 let output: Vec<Float> = result
407 .par_iter()
408 .zip(mc_dataset.events.par_iter())
409 .map(|(l, e)| e.weight * l.re / n_mc as Float)
410 .collect();
411 #[cfg(not(feature = "rayon"))]
412 let output: Vec<Float> = result
413 .iter()
414 .zip(mc_dataset.events.iter())
415 .map(|(l, e)| e.weight * l.re / n_mc as Float)
416 .collect();
417 self.data_evaluator.resources.write().active = current_active_data;
418 self.accmc_evaluator.resources.write().active = current_active_accmc;
419 Ok(output)
420 }
421 }
422
423 #[cfg(feature = "mpi")]
433 pub fn project_with_mpi<T: AsRef<str>>(
434 &self,
435 parameters: &[Float],
436 names: &[T],
437 mc_evaluator: Option<Evaluator>,
438 world: &SimpleCommunicator,
439 ) -> Result<Vec<Float>, LadduError> {
440 let n_events = mc_evaluator
441 .as_ref()
442 .unwrap_or(&self.accmc_evaluator)
443 .dataset
444 .n_events();
445 let local_projection = self.project_with_local(parameters, names, mc_evaluator)?;
446 let mut buffer: Vec<Float> = vec![0.0; n_events];
447 let (counts, displs) = world.get_counts_displs(n_events);
448 {
449 let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
450 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
451 }
452 Ok(buffer)
453 }
454
455 pub fn project_with<T: AsRef<str>>(
472 &self,
473 parameters: &[Float],
474 names: &[T],
475 mc_evaluator: Option<Evaluator>,
476 ) -> Result<Vec<Float>, LadduError> {
477 #[cfg(feature = "mpi")]
478 {
479 if let Some(world) = laddu_core::mpi::get_world() {
480 return self.project_with_mpi(parameters, names, mc_evaluator, &world);
481 }
482 }
483 self.project_with_local(parameters, names, mc_evaluator)
484 }
485
486 pub fn project_gradient_with_local<T: AsRef<str>>(
497 &self,
498 parameters: &[Float],
499 names: &[T],
500 mc_evaluator: Option<Evaluator>,
501 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
502 if let Some(mc_evaluator) = &mc_evaluator {
503 let current_active_mc = mc_evaluator.resources.read().active.clone();
504 mc_evaluator.isolate_many(names)?;
505 let mc_dataset = mc_evaluator.dataset.clone();
506 let result = mc_evaluator.evaluate_local(parameters);
507 let result_gradient = mc_evaluator.evaluate_gradient(parameters);
508 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
509 #[cfg(feature = "rayon")]
510 let (res, res_gradient) = {
511 (
512 result
513 .par_iter()
514 .zip(mc_dataset.events.par_iter())
515 .map(|(l, e)| e.weight * l.re / n_mc)
516 .collect(),
517 result_gradient
518 .par_iter()
519 .zip(mc_dataset.events.par_iter())
520 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
521 .collect(),
522 )
523 };
524 #[cfg(not(feature = "rayon"))]
525 let (res, res_gradient) = {
526 (
527 result
528 .iter()
529 .zip(mc_dataset.events.iter())
530 .map(|(l, e)| e.weight * l.re / n_mc)
531 .collect(),
532 result_gradient
533 .iter()
534 .zip(mc_dataset.events.iter())
535 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
536 .collect(),
537 )
538 };
539 mc_evaluator.resources.write().active = current_active_mc;
540 Ok((res, res_gradient))
541 } else {
542 let current_active_data = self.data_evaluator.resources.read().active.clone();
543 let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
544 self.isolate_many(names)?;
545 let mc_dataset = &self.accmc_evaluator.dataset;
546 let result = self.accmc_evaluator.evaluate_local(parameters);
547 let result_gradient = self.accmc_evaluator.evaluate_gradient(parameters);
548 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
549 #[cfg(feature = "rayon")]
550 let (res, res_gradient) = {
551 (
552 result
553 .par_iter()
554 .zip(mc_dataset.events.par_iter())
555 .map(|(l, e)| e.weight * l.re / n_mc)
556 .collect(),
557 result_gradient
558 .par_iter()
559 .zip(mc_dataset.events.par_iter())
560 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
561 .collect(),
562 )
563 };
564 #[cfg(not(feature = "rayon"))]
565 let (res, res_gradient) = {
566 (
567 result
568 .iter()
569 .zip(mc_dataset.events.iter())
570 .map(|(l, e)| e.weight * l.re / n_mc)
571 .collect(),
572 result_gradient
573 .iter()
574 .zip(mc_dataset.events.iter())
575 .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
576 .collect(),
577 )
578 };
579 self.data_evaluator.resources.write().active = current_active_data;
580 self.accmc_evaluator.resources.write().active = current_active_accmc;
581 Ok((res, res_gradient))
582 }
583 }
584
585 #[cfg(feature = "mpi")]
596 pub fn project_gradient_with_mpi<T: AsRef<str>>(
597 &self,
598 parameters: &[Float],
599 names: &[T],
600 mc_evaluator: Option<Evaluator>,
601 world: &SimpleCommunicator,
602 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
603 let n_events = mc_evaluator
604 .as_ref()
605 .unwrap_or(&self.accmc_evaluator)
606 .dataset
607 .n_events();
608 let (local_projection, local_gradient_projection) =
609 self.project_gradient_with_local(parameters, names, mc_evaluator)?;
610 let mut projection_result: Vec<Float> = vec![0.0; n_events];
611 let (counts, displs) = world.get_counts_displs(n_events);
612 {
613 let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
614 world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
615 }
616
617 let flattened_local_gradient_projection = local_gradient_projection
618 .iter()
619 .flat_map(|g| g.data.as_vec().to_vec())
620 .collect::<Vec<Float>>();
621 let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
622 let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
623 let mut partitioned_flattened_result_buffer =
624 PartitionMut::new(&mut flattened_result_buffer, counts, displs);
625 world.all_gather_varcount_into(
626 &flattened_local_gradient_projection,
627 &mut partitioned_flattened_result_buffer,
628 );
629 let gradient_projection_result = flattened_result_buffer
630 .chunks(parameters.len())
631 .map(DVector::from_row_slice)
632 .collect();
633 Ok((projection_result, gradient_projection_result))
634 }
635 pub fn project_gradient_with<T: AsRef<str>>(
654 &self,
655 parameters: &[Float],
656 names: &[T],
657 mc_evaluator: Option<Evaluator>,
658 ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
659 #[cfg(feature = "mpi")]
660 {
661 if let Some(world) = laddu_core::mpi::get_world() {
662 return self.project_gradient_with_mpi(parameters, names, mc_evaluator, &world);
663 }
664 }
665 self.project_gradient_with_local(parameters, names, mc_evaluator)
666 }
667
668 fn evaluate_local(&self, parameters: &[Float]) -> Float {
669 let data_result = self.data_evaluator.evaluate_local(parameters);
670 let mc_result = self.accmc_evaluator.evaluate_local(parameters);
671 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
672 #[cfg(feature = "rayon")]
673 let data_term: Float = data_result
674 .par_iter()
675 .zip(self.data_evaluator.dataset.events.par_iter())
676 .map(|(l, e)| e.weight * Float::ln(l.re))
677 .parallel_sum_with_accumulator::<Klein<Float>>();
678 #[cfg(feature = "rayon")]
679 let mc_term: Float = mc_result
680 .par_iter()
681 .zip(self.accmc_evaluator.dataset.events.par_iter())
682 .map(|(l, e)| e.weight * l.re)
683 .parallel_sum_with_accumulator::<Klein<Float>>();
684 #[cfg(not(feature = "rayon"))]
685 let data_term: Float = data_result
686 .iter()
687 .zip(self.data_evaluator.dataset.events.iter())
688 .map(|(l, e)| e.weight * Float::ln(l.re))
689 .sum_with_accumulator::<Klein<Float>>();
690 #[cfg(not(feature = "rayon"))]
691 let mc_term: Float = mc_result
692 .iter()
693 .zip(self.accmc_evaluator.dataset.events.iter())
694 .map(|(l, e)| e.weight * l.re)
695 .sum_with_accumulator::<Klein<Float>>();
696 -2.0 * (data_term - mc_term / n_mc)
697 }
698
699 #[cfg(feature = "mpi")]
700 fn evaluate_mpi(&self, parameters: &[Float], world: &SimpleCommunicator) -> Float {
701 let local_evaluation = self.evaluate_local(parameters);
702 let mut buffer: Vec<Float> = vec![0.0; world.size() as usize];
703 world.all_gather_into(&local_evaluation, &mut buffer);
704 buffer.iter().sum()
705 }
706
707 fn evaluate_gradient_local(&self, parameters: &[Float]) -> DVector<Float> {
708 let data_resources = self.data_evaluator.resources.read();
709 let data_parameters = Parameters::new(parameters, &data_resources.constants);
710 let mc_resources = self.accmc_evaluator.resources.read();
711 let mc_parameters = Parameters::new(parameters, &mc_resources.constants);
712 let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
713 #[cfg(feature = "rayon")]
714 let data_term: DVector<Float> = self
715 .data_evaluator
716 .dataset
717 .events
718 .par_iter()
719 .zip(data_resources.caches.par_iter())
720 .map(|(event, cache)| {
721 let mut gradient_values =
722 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
723 self.data_evaluator
724 .amplitudes
725 .iter()
726 .zip(data_resources.active.iter())
727 .zip(gradient_values.iter_mut())
728 .for_each(|((amp, active), grad)| {
729 if *active {
730 amp.compute_gradient(&data_parameters, event, cache, grad)
731 }
732 });
733 (
734 event.weight,
735 AmplitudeValues(
736 self.data_evaluator
737 .amplitudes
738 .iter()
739 .zip(data_resources.active.iter())
740 .map(|(amp, active)| {
741 if *active {
742 amp.compute(&data_parameters, event, cache)
743 } else {
744 Complex::ZERO
745 }
746 })
747 .collect(),
748 ),
749 GradientValues(gradient_values),
750 )
751 })
752 .map(|(weight, amp_vals, grad_vals)| {
753 (
754 weight,
755 self.data_evaluator.expression.evaluate(&_vals),
756 self.data_evaluator
757 .expression
758 .evaluate_gradient(&_vals, &grad_vals),
759 )
760 })
761 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
762 .collect::<Vec<DVector<Float>>>()
763 .iter()
764 .sum(); #[cfg(feature = "rayon")]
766 let mc_term: DVector<Float> = self
767 .accmc_evaluator
768 .dataset
769 .events
770 .par_iter()
771 .zip(mc_resources.caches.par_iter())
772 .map(|(event, cache)| {
773 let mut gradient_values =
774 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
775 self.accmc_evaluator
776 .amplitudes
777 .iter()
778 .zip(mc_resources.active.iter())
779 .zip(gradient_values.iter_mut())
780 .for_each(|((amp, active), grad)| {
781 if *active {
782 amp.compute_gradient(&mc_parameters, event, cache, grad)
783 }
784 });
785 (
786 event.weight,
787 AmplitudeValues(
788 self.accmc_evaluator
789 .amplitudes
790 .iter()
791 .zip(mc_resources.active.iter())
792 .map(|(amp, active)| {
793 if *active {
794 amp.compute(&mc_parameters, event, cache)
795 } else {
796 Complex::ZERO
797 }
798 })
799 .collect(),
800 ),
801 GradientValues(gradient_values),
802 )
803 })
804 .map(|(weight, amp_vals, grad_vals)| {
805 (
806 weight,
807 self.accmc_evaluator
808 .expression
809 .evaluate_gradient(&_vals, &grad_vals),
810 )
811 })
812 .map(|(w, g)| w * g.map(|gi| gi.re))
813 .collect::<Vec<DVector<Float>>>()
814 .iter()
815 .sum();
816 #[cfg(not(feature = "rayon"))]
817 let data_term: DVector<Float> = self
818 .data_evaluator
819 .dataset
820 .events
821 .iter()
822 .zip(data_resources.caches.iter())
823 .map(|(event, cache)| {
824 let mut gradient_values =
825 vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
826 self.data_evaluator
827 .amplitudes
828 .iter()
829 .zip(data_resources.active.iter())
830 .zip(gradient_values.iter_mut())
831 .for_each(|((amp, active), grad)| {
832 if *active {
833 amp.compute_gradient(&data_parameters, event, cache, grad)
834 }
835 });
836 (
837 event.weight,
838 AmplitudeValues(
839 self.data_evaluator
840 .amplitudes
841 .iter()
842 .zip(data_resources.active.iter())
843 .map(|(amp, active)| {
844 if *active {
845 amp.compute(&data_parameters, event, cache)
846 } else {
847 Complex::ZERO
848 }
849 })
850 .collect(),
851 ),
852 GradientValues(gradient_values),
853 )
854 })
855 .map(|(weight, amp_vals, grad_vals)| {
856 (
857 weight,
858 self.data_evaluator.expression.evaluate(&_vals),
859 self.data_evaluator
860 .expression
861 .evaluate_gradient(&_vals, &grad_vals),
862 )
863 })
864 .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
865 .sum();
866 #[cfg(not(feature = "rayon"))]
867 let mc_term: DVector<Float> = self
868 .accmc_evaluator
869 .dataset
870 .events
871 .iter()
872 .zip(mc_resources.caches.iter())
873 .map(|(event, cache)| {
874 let mut gradient_values =
875 vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
876 self.accmc_evaluator
877 .amplitudes
878 .iter()
879 .zip(mc_resources.active.iter())
880 .zip(gradient_values.iter_mut())
881 .for_each(|((amp, active), grad)| {
882 if *active {
883 amp.compute_gradient(&mc_parameters, event, cache, grad)
884 }
885 });
886 (
887 event.weight,
888 AmplitudeValues(
889 self.accmc_evaluator
890 .amplitudes
891 .iter()
892 .zip(mc_resources.active.iter())
893 .map(|(amp, active)| {
894 if *active {
895 amp.compute(&mc_parameters, event, cache)
896 } else {
897 Complex::ZERO
898 }
899 })
900 .collect(),
901 ),
902 GradientValues(gradient_values),
903 )
904 })
905 .map(|(weight, amp_vals, grad_vals)| {
906 (
907 weight,
908 self.accmc_evaluator
909 .expression
910 .evaluate_gradient(&_vals, &grad_vals),
911 )
912 })
913 .map(|(w, g)| w * g.map(|gi| gi.re))
914 .sum();
915 -2.0 * (data_term - mc_term / n_mc)
916 }
917
918 #[cfg(feature = "mpi")]
919 fn evaluate_gradient_mpi(
920 &self,
921 parameters: &[Float],
922 world: &SimpleCommunicator,
923 ) -> DVector<Float> {
924 let local_evaluation_vec = self
925 .evaluate_gradient_local(parameters)
926 .data
927 .as_vec()
928 .to_vec();
929 let mut flattened_result_buffer = vec![0.0; world.size() as usize * parameters.len()];
930 world.all_gather_into(&local_evaluation_vec, &mut flattened_result_buffer);
931 flattened_result_buffer
932 .chunks(parameters.len())
933 .map(DVector::from_row_slice)
934 .sum::<DVector<Float>>()
935 }
936}
937
938impl LikelihoodTerm for NLL {
939 fn parameters(&self) -> Vec<String> {
942 self.data_evaluator
943 .resources
944 .read()
945 .parameters
946 .iter()
947 .cloned()
948 .collect()
949 }
950
951 fn evaluate(&self, parameters: &[Float]) -> Float {
961 #[cfg(feature = "mpi")]
962 {
963 if let Some(world) = laddu_core::mpi::get_world() {
964 return self.evaluate_mpi(parameters, &world);
965 }
966 }
967 self.evaluate_local(parameters)
968 }
969
970 fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
976 #[cfg(feature = "mpi")]
977 {
978 if let Some(world) = laddu_core::mpi::get_world() {
979 return self.evaluate_gradient_mpi(parameters, &world);
980 }
981 }
982 self.evaluate_gradient_local(parameters)
983 }
984}
985
986#[cfg(feature = "rayon")]
987impl Function<ThreadPool, LadduError> for NLL {
988 fn evaluate(
989 &self,
990 parameters: &[Float],
991 thread_pool: &mut ThreadPool,
992 ) -> Result<Float, LadduError> {
993 Ok(thread_pool.install(|| LikelihoodTerm::evaluate(self, parameters)))
994 }
995 fn gradient(
996 &self,
997 parameters: &[Float],
998 thread_pool: &mut ThreadPool,
999 ) -> Result<DVector<Float>, LadduError> {
1000 Ok(thread_pool.install(|| LikelihoodTerm::evaluate_gradient(self, parameters)))
1001 }
1002}
1003
1004#[cfg(not(feature = "rayon"))]
1005impl Function<(), LadduError> for NLL {
1006 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
1007 Ok(LikelihoodTerm::evaluate(self, parameters))
1008 }
1009 fn gradient(
1010 &self,
1011 parameters: &[Float],
1012 _user_data: &mut (),
1013 ) -> Result<DVector<Float>, LadduError> {
1014 Ok(LikelihoodTerm::evaluate_gradient(self, parameters))
1015 }
1016}
1017
1018pub(crate) struct LogLikelihood<'a>(&'a NLL);
1019
1020#[cfg(feature = "rayon")]
1021impl Function<ThreadPool, LadduError> for LogLikelihood<'_> {
1022 fn evaluate(
1023 &self,
1024 parameters: &[Float],
1025 thread_pool: &mut ThreadPool,
1026 ) -> Result<Float, LadduError> {
1027 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
1028 }
1029 fn gradient(
1030 &self,
1031 parameters: &[Float],
1032 thread_pool: &mut ThreadPool,
1033 ) -> Result<DVector<Float>, LadduError> {
1034 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
1035 }
1036}
1037
1038#[cfg(not(feature = "rayon"))]
1039impl Function<(), LadduError> for LogLikelihood<'_> {
1040 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
1041 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
1042 }
1043 fn gradient(
1044 &self,
1045 parameters: &[Float],
1046 user_data: &mut (),
1047 ) -> Result<DVector<Float>, LadduError> {
1048 Function::gradient(self.0, parameters, user_data).map(|res| -res)
1049 }
1050}
1051
1052impl NLL {
1053 pub fn minimize(
1056 &self,
1057 p0: &[Float],
1058 bounds: Option<Vec<(Float, Float)>>,
1059 options: Option<MinimizerOptions>,
1060 ) -> Result<Status, LadduError> {
1061 let options = options.unwrap_or_default();
1062 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
1063 .with_bounds(bounds)
1064 .with_max_steps(options.max_steps);
1065 for observer in options.observers {
1066 m = m.with_observer(observer);
1067 }
1068 #[cfg(feature = "rayon")]
1069 {
1070 m.minimize(
1071 self,
1072 p0,
1073 &mut ThreadPoolBuilder::new()
1074 .num_threads(options.threads)
1075 .build()
1076 .map_err(LadduError::from)?,
1077 )?;
1078 }
1079 #[cfg(not(feature = "rayon"))]
1080 {
1081 m.minimize(self, p0, &mut ())?;
1082 }
1083 Ok(m.status)
1084 }
1085 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
1087 &self,
1088 p0: T,
1089 n_steps: usize,
1090 options: Option<MCMCOptions>,
1091 rng: Rng,
1092 ) -> Result<Ensemble, LadduError> {
1093 let options = options.unwrap_or(MCMCOptions::new_ess(
1094 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
1095 rng,
1096 ));
1097 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
1098 for observer in options.observers {
1099 m = m.with_observer(observer);
1100 }
1101 let func = LogLikelihood(self);
1102 #[cfg(feature = "rayon")]
1103 {
1104 m.sample(
1105 &func,
1106 &mut ThreadPoolBuilder::new()
1107 .num_threads(options.threads)
1108 .build()
1109 .map_err(LadduError::from)?,
1110 n_steps,
1111 )?;
1112 }
1113 #[cfg(not(feature = "rayon"))]
1114 {
1115 m.sample(&func, &mut (), n_steps)?;
1116 }
1117 Ok(m.ensemble)
1118 }
1119}
1120
1121#[cfg(feature = "python")]
1133#[pyclass(name = "NLL", module = "laddu")]
1134#[derive(Clone)]
1135pub struct PyNLL(pub Box<NLL>);
1136
1137#[cfg(feature = "python")]
1138#[pymethods]
1139impl PyNLL {
1140 #[new]
1141 #[pyo3(signature = (model, ds_data, ds_accmc))]
1142 fn new(model: &PyModel, ds_data: &PyDataset, ds_accmc: &PyDataset) -> Self {
1143 Self(NLL::new(&model.0, &ds_data.0, &ds_accmc.0))
1144 }
1145 #[getter]
1152 fn data(&self) -> PyDataset {
1153 PyDataset(self.0.data_evaluator.dataset.clone())
1154 }
1155 #[getter]
1162 fn accmc(&self) -> PyDataset {
1163 PyDataset(self.0.accmc_evaluator.dataset.clone())
1164 }
1165 fn as_term(&self) -> PyLikelihoodTerm {
1173 PyLikelihoodTerm(self.0.clone())
1174 }
1175 #[getter]
1182 fn parameters(&self) -> Vec<String> {
1183 self.0.parameters()
1184 }
1185 fn activate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1200 if let Ok(string_arg) = arg.extract::<String>() {
1201 self.0.activate(&string_arg)?;
1202 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1203 let vec: Vec<String> = list_arg.extract()?;
1204 self.0.activate_many(&vec)?;
1205 } else {
1206 return Err(PyTypeError::new_err(
1207 "Argument must be either a string or a list of strings",
1208 ));
1209 }
1210 Ok(())
1211 }
1212 fn activate_all(&self) {
1215 self.0.activate_all();
1216 }
1217 fn deactivate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1234 if let Ok(string_arg) = arg.extract::<String>() {
1235 self.0.deactivate(&string_arg)?;
1236 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1237 let vec: Vec<String> = list_arg.extract()?;
1238 self.0.deactivate_many(&vec)?;
1239 } else {
1240 return Err(PyTypeError::new_err(
1241 "Argument must be either a string or a list of strings",
1242 ));
1243 }
1244 Ok(())
1245 }
1246 fn deactivate_all(&self) {
1249 self.0.deactivate_all();
1250 }
1251 fn isolate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1268 if let Ok(string_arg) = arg.extract::<String>() {
1269 self.0.isolate(&string_arg)?;
1270 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1271 let vec: Vec<String> = list_arg.extract()?;
1272 self.0.isolate_many(&vec)?;
1273 } else {
1274 return Err(PyTypeError::new_err(
1275 "Argument must be either a string or a list of strings",
1276 ));
1277 }
1278 Ok(())
1279 }
1280 #[pyo3(signature = (parameters, *, threads=None))]
1304 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
1305 #[cfg(feature = "rayon")]
1306 {
1307 Ok(ThreadPoolBuilder::new()
1308 .num_threads(threads.unwrap_or_else(num_cpus::get))
1309 .build()
1310 .map_err(LadduError::from)?
1311 .install(|| LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters)))
1312 }
1313 #[cfg(not(feature = "rayon"))]
1314 {
1315 Ok(LikelihoodTerm::evaluate(self.0.as_ref(), ¶meters))
1316 }
1317 }
1318 #[pyo3(signature = (parameters, *, threads=None))]
1339 fn evaluate_gradient<'py>(
1340 &self,
1341 py: Python<'py>,
1342 parameters: Vec<Float>,
1343 threads: Option<usize>,
1344 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1345 #[cfg(feature = "rayon")]
1346 {
1347 Ok(PyArray1::from_slice(
1348 py,
1349 ThreadPoolBuilder::new()
1350 .num_threads(threads.unwrap_or_else(num_cpus::get))
1351 .build()
1352 .map_err(LadduError::from)?
1353 .install(|| self.0.evaluate_gradient(¶meters))
1354 .as_slice(),
1355 ))
1356 }
1357 #[cfg(not(feature = "rayon"))]
1358 {
1359 Ok(PyArray1::from_slice(
1360 py,
1361 self.0.evaluate_gradient(¶meters).as_slice(),
1362 ))
1363 }
1364 }
1365 #[pyo3(signature = (parameters, *, mc_evaluator = None, threads=None))]
1392 fn project<'py>(
1393 &self,
1394 py: Python<'py>,
1395 parameters: Vec<Float>,
1396 mc_evaluator: Option<PyEvaluator>,
1397 threads: Option<usize>,
1398 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1399 #[cfg(feature = "rayon")]
1400 {
1401 Ok(PyArray1::from_slice(
1402 py,
1403 ThreadPoolBuilder::new()
1404 .num_threads(threads.unwrap_or_else(num_cpus::get))
1405 .build()
1406 .map_err(LadduError::from)?
1407 .install(|| {
1408 self.0
1409 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1410 })
1411 .as_slice(),
1412 ))
1413 }
1414 #[cfg(not(feature = "rayon"))]
1415 {
1416 Ok(PyArray1::from_slice(
1417 py,
1418 self.0
1419 .project(¶meters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1420 .as_slice(),
1421 ))
1422 }
1423 }
1424
1425 #[pyo3(signature = (parameters, arg, *, mc_evaluator = None, threads=None))]
1463 fn project_with<'py>(
1464 &self,
1465 py: Python<'py>,
1466 parameters: Vec<Float>,
1467 arg: &Bound<'_, PyAny>,
1468 mc_evaluator: Option<PyEvaluator>,
1469 threads: Option<usize>,
1470 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1471 let names = if let Ok(string_arg) = arg.extract::<String>() {
1472 vec![string_arg]
1473 } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1474 let vec: Vec<String> = list_arg.extract()?;
1475 vec
1476 } else {
1477 return Err(PyTypeError::new_err(
1478 "Argument must be either a string or a list of strings",
1479 ));
1480 };
1481 #[cfg(feature = "rayon")]
1482 {
1483 Ok(PyArray1::from_slice(
1484 py,
1485 ThreadPoolBuilder::new()
1486 .num_threads(threads.unwrap_or_else(num_cpus::get))
1487 .build()
1488 .map_err(LadduError::from)?
1489 .install(|| {
1490 self.0.project_with(
1491 ¶meters,
1492 &names,
1493 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1494 )
1495 })?
1496 .as_slice(),
1497 ))
1498 }
1499 #[cfg(not(feature = "rayon"))]
1500 {
1501 Ok(PyArray1::from_slice(
1502 py,
1503 self.0
1504 .project_with(
1505 ¶meters,
1506 &names,
1507 mc_evaluator.map(|pyeval| pyeval.0.clone()),
1508 )?
1509 .as_slice(),
1510 ))
1511 }
1512 }
1513
1514 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
1594 #[allow(clippy::too_many_arguments)]
1595 fn minimize(
1596 &self,
1597 p0: Vec<Float>,
1598 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
1599 method: &str,
1600 max_steps: usize,
1601 debug: bool,
1602 verbose: bool,
1603 kwargs: Option<&Bound<'_, PyDict>>,
1604 ) -> PyResult<PyStatus> {
1605 let bounds = bounds.map(|bounds_vec| {
1606 bounds_vec
1607 .iter()
1608 .map(|(opt_lb, opt_ub)| {
1609 (
1610 opt_lb.unwrap_or(Float::NEG_INFINITY),
1611 opt_ub.unwrap_or(Float::INFINITY),
1612 )
1613 })
1614 .collect()
1615 });
1616 let n_parameters = p0.len();
1617 let options =
1618 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
1619 let status = self.0.minimize(&p0, bounds, Some(options))?;
1620 Ok(PyStatus(status))
1621 }
1622 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
1687 #[allow(clippy::too_many_arguments)]
1688 fn mcmc(
1689 &self,
1690 p0: Vec<Vec<Float>>,
1691 n_steps: usize,
1692 method: &str,
1693 debug: bool,
1694 verbose: bool,
1695 seed: u64,
1696 kwargs: Option<&Bound<'_, PyDict>>,
1697 ) -> PyResult<PyEnsemble> {
1698 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
1699 let mut rng = Rng::new();
1700 rng.seed(seed);
1701 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
1702 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
1703 Ok(PyEnsemble(ensemble))
1704 }
1705}
1706
1707#[derive(Clone, Debug)]
1710pub struct LikelihoodID(usize);
1711
1712impl Display for LikelihoodID {
1713 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1714 write!(f, "{}", self.0)
1715 }
1716}
1717
1718#[cfg(feature = "python")]
1725#[pyclass(name = "LikelihoodID", module = "laddu")]
1726#[derive(Clone)]
1727pub struct PyLikelihoodID(LikelihoodID);
1728
1729#[cfg(feature = "python")]
1730#[pymethods]
1731impl PyLikelihoodID {
1732 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1733 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1734 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
1735 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1736 Ok(PyLikelihoodExpression(
1737 self.0.clone() + other_expr.0.clone(),
1738 ))
1739 } else if let Ok(int) = other.extract::<usize>() {
1740 if int == 0 {
1741 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1742 self.0.clone(),
1743 )))
1744 } else {
1745 Err(PyTypeError::new_err(
1746 "Addition with an integer for this type is only defined for 0",
1747 ))
1748 }
1749 } else {
1750 Err(PyTypeError::new_err("Unsupported operand type for +"))
1751 }
1752 }
1753 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1754 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1755 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
1756 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1757 Ok(PyLikelihoodExpression(
1758 other_expr.0.clone() + self.0.clone(),
1759 ))
1760 } else if let Ok(int) = other.extract::<usize>() {
1761 if int == 0 {
1762 Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1763 self.0.clone(),
1764 )))
1765 } else {
1766 Err(PyTypeError::new_err(
1767 "Addition with an integer for this type is only defined for 0",
1768 ))
1769 }
1770 } else {
1771 Err(PyTypeError::new_err("Unsupported operand type for +"))
1772 }
1773 }
1774 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1775 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1776 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
1777 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1778 Ok(PyLikelihoodExpression(
1779 self.0.clone() * other_expr.0.clone(),
1780 ))
1781 } else {
1782 Err(PyTypeError::new_err("Unsupported operand type for *"))
1783 }
1784 }
1785 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1786 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1787 Ok(PyLikelihoodExpression(other_aid.0.clone() * self.0.clone()))
1788 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1789 Ok(PyLikelihoodExpression(
1790 other_expr.0.clone() * self.0.clone(),
1791 ))
1792 } else {
1793 Err(PyTypeError::new_err("Unsupported operand type for *"))
1794 }
1795 }
1796 fn __str__(&self) -> String {
1797 format!("{}", self.0)
1798 }
1799 fn __repr__(&self) -> String {
1800 format!("{:?}", self.0)
1801 }
1802}
1803
1804#[derive(Default, Clone)]
1806pub struct LikelihoodManager {
1807 terms: Vec<Box<dyn LikelihoodTerm>>,
1808 param_name_to_index: HashMap<String, usize>,
1809 param_names: Vec<String>,
1810 param_layouts: Vec<Vec<usize>>,
1811 param_counts: Vec<usize>,
1812}
1813
1814impl LikelihoodManager {
1815 pub fn register(&mut self, term: Box<dyn LikelihoodTerm>) -> LikelihoodID {
1818 let term_idx = self.terms.len();
1819 for param_name in term.parameters() {
1820 if !self.param_name_to_index.contains_key(¶m_name) {
1821 self.param_name_to_index
1822 .insert(param_name.clone(), self.param_name_to_index.len());
1823 self.param_names.push(param_name.clone());
1824 }
1825 }
1826 let param_layout: Vec<usize> = term
1827 .parameters()
1828 .iter()
1829 .map(|name| self.param_name_to_index[name])
1830 .collect();
1831 let param_count = term.parameters().len();
1832 self.param_layouts.push(param_layout);
1833 self.param_counts.push(param_count);
1834 self.terms.push(term.clone());
1835
1836 LikelihoodID(term_idx)
1837 }
1838
1839 pub fn parameters(&self) -> Vec<String> {
1843 self.param_names.clone()
1844 }
1845
1846 pub fn load(&self, likelihood_expression: &LikelihoodExpression) -> LikelihoodEvaluator {
1849 LikelihoodEvaluator {
1850 likelihood_manager: self.clone(),
1851 likelihood_expression: likelihood_expression.clone(),
1852 }
1853 }
1854}
1855
1856#[cfg(feature = "python")]
1859#[pyclass(name = "LikelihoodManager", module = "laddu")]
1860#[derive(Clone)]
1861pub struct PyLikelihoodManager(LikelihoodManager);
1862
1863#[cfg(feature = "python")]
1864#[pymethods]
1865impl PyLikelihoodManager {
1866 #[new]
1867 fn new() -> Self {
1868 Self(LikelihoodManager::default())
1869 }
1870 fn register(&mut self, likelihood_term: &PyLikelihoodTerm) -> PyLikelihoodID {
1884 PyLikelihoodID(self.0.register(likelihood_term.0.clone()))
1885 }
1886 fn parameters(&self) -> Vec<String> {
1894 self.0.parameters()
1895 }
1896 fn load(&self, likelihood_expression: &PyLikelihoodExpression) -> PyLikelihoodEvaluator {
1921 PyLikelihoodEvaluator(self.0.load(&likelihood_expression.0))
1922 }
1923}
1924
1925#[derive(Debug)]
1926struct LikelihoodValues(Vec<Float>);
1927
1928#[derive(Debug)]
1929struct LikelihoodGradients(Vec<DVector<Float>>);
1930
1931#[derive(Clone)]
1933pub enum LikelihoodExpression {
1934 Term(LikelihoodID),
1936 Add(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1938 Mul(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1940}
1941
1942impl Debug for LikelihoodExpression {
1943 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1944 self.write_tree(f, "", "", "")
1945 }
1946}
1947
1948impl Display for LikelihoodExpression {
1949 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1950 write!(f, "{:?}", self)
1951 }
1952}
1953
1954impl LikelihoodExpression {
1955 fn evaluate(&self, likelihood_values: &LikelihoodValues) -> Float {
1956 match self {
1957 LikelihoodExpression::Term(lid) => likelihood_values.0[lid.0],
1958 LikelihoodExpression::Add(a, b) => {
1959 a.evaluate(likelihood_values) + b.evaluate(likelihood_values)
1960 }
1961 LikelihoodExpression::Mul(a, b) => {
1962 a.evaluate(likelihood_values) * b.evaluate(likelihood_values)
1963 }
1964 }
1965 }
1966 fn evaluate_gradient(
1967 &self,
1968 likelihood_values: &LikelihoodValues,
1969 likelihood_gradients: &LikelihoodGradients,
1970 ) -> DVector<Float> {
1971 match self {
1972 LikelihoodExpression::Term(lid) => likelihood_gradients.0[lid.0].clone(),
1973 LikelihoodExpression::Add(a, b) => {
1974 a.evaluate_gradient(likelihood_values, likelihood_gradients)
1975 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
1976 }
1977 LikelihoodExpression::Mul(a, b) => {
1978 a.evaluate_gradient(likelihood_values, likelihood_gradients)
1979 * b.evaluate(likelihood_values)
1980 + b.evaluate_gradient(likelihood_values, likelihood_gradients)
1981 * a.evaluate(likelihood_values)
1982 }
1983 }
1984 }
1985 fn write_tree(
1987 &self,
1988 f: &mut std::fmt::Formatter<'_>,
1989 parent_prefix: &str,
1990 immediate_prefix: &str,
1991 parent_suffix: &str,
1992 ) -> std::fmt::Result {
1993 let display_string = match self {
1994 Self::Term(lid) => format!("{}", lid.0),
1996 Self::Add(_, _) => "+".to_string(),
1997 Self::Mul(_, _) => "*".to_string(),
1998 };
1999 writeln!(f, "{}{}{}", parent_prefix, immediate_prefix, display_string)?;
2000 match self {
2001 Self::Term(_) => {}
2002 Self::Add(a, b) | Self::Mul(a, b) => {
2003 let terms = [a, b];
2004 let mut it = terms.iter().peekable();
2005 let child_prefix = format!("{}{}", parent_prefix, parent_suffix);
2006 while let Some(child) = it.next() {
2007 match it.peek() {
2008 Some(_) => child.write_tree(f, &child_prefix, "├─ ", "│ "),
2009 None => child.write_tree(f, &child_prefix, "└─ ", " "),
2010 }?;
2011 }
2012 }
2013 }
2014 Ok(())
2015 }
2016}
2017
2018impl_op_ex!(+ |a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(a.clone()), Box::new(b.clone()))});
2019impl_op_ex!(
2020 *|a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression {
2021 LikelihoodExpression::Mul(Box::new(a.clone()), Box::new(b.clone()))
2022 }
2023);
2024impl_op_ex_commutative!(+ |a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(b.clone()))});
2025impl_op_ex_commutative!(
2026 *|a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression {
2027 LikelihoodExpression::Mul(
2028 Box::new(LikelihoodExpression::Term(a.clone())),
2029 Box::new(b.clone()),
2030 )
2031 }
2032);
2033impl_op_ex!(+ |a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(LikelihoodExpression::Term(b.clone())))});
2034impl_op_ex!(
2035 *|a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression {
2036 LikelihoodExpression::Mul(
2037 Box::new(LikelihoodExpression::Term(a.clone())),
2038 Box::new(LikelihoodExpression::Term(b.clone())),
2039 )
2040 }
2041);
2042
2043#[cfg(feature = "python")]
2046#[pyclass(name = "LikelihoodExpression", module = "laddu")]
2047#[derive(Clone)]
2048pub struct PyLikelihoodExpression(LikelihoodExpression);
2049
2050#[cfg(feature = "python")]
2051#[pymethods]
2052impl PyLikelihoodExpression {
2053 fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2054 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2055 Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
2056 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2057 Ok(PyLikelihoodExpression(
2058 self.0.clone() + other_expr.0.clone(),
2059 ))
2060 } else if let Ok(int) = other.extract::<usize>() {
2061 if int == 0 {
2062 Ok(PyLikelihoodExpression(self.0.clone()))
2063 } else {
2064 Err(PyTypeError::new_err(
2065 "Addition with an integer for this type is only defined for 0",
2066 ))
2067 }
2068 } else {
2069 Err(PyTypeError::new_err("Unsupported operand type for +"))
2070 }
2071 }
2072 fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2073 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2074 Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
2075 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2076 Ok(PyLikelihoodExpression(
2077 other_expr.0.clone() + self.0.clone(),
2078 ))
2079 } else if let Ok(int) = other.extract::<usize>() {
2080 if int == 0 {
2081 Ok(PyLikelihoodExpression(self.0.clone()))
2082 } else {
2083 Err(PyTypeError::new_err(
2084 "Addition with an integer for this type is only defined for 0",
2085 ))
2086 }
2087 } else {
2088 Err(PyTypeError::new_err("Unsupported operand type for +"))
2089 }
2090 }
2091 fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2092 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2093 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2094 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2095 Ok(PyLikelihoodExpression(
2096 self.0.clone() * other_expr.0.clone(),
2097 ))
2098 } else {
2099 Err(PyTypeError::new_err("Unsupported operand type for *"))
2100 }
2101 }
2102 fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2103 if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2104 Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2105 } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2106 Ok(PyLikelihoodExpression(
2107 self.0.clone() * other_expr.0.clone(),
2108 ))
2109 } else {
2110 Err(PyTypeError::new_err("Unsupported operand type for *"))
2111 }
2112 }
2113 fn __str__(&self) -> String {
2114 format!("{}", self.0)
2115 }
2116 fn __repr__(&self) -> String {
2117 format!("{:?}", self.0)
2118 }
2119}
2120
2121pub struct LikelihoodEvaluator {
2123 likelihood_manager: LikelihoodManager,
2124 likelihood_expression: LikelihoodExpression,
2125}
2126
2127#[cfg(feature = "rayon")]
2128impl Function<ThreadPool, LadduError> for LikelihoodEvaluator {
2129 fn evaluate(
2130 &self,
2131 parameters: &[Float],
2132 thread_pool: &mut ThreadPool,
2133 ) -> Result<Float, LadduError> {
2134 thread_pool.install(|| self.evaluate(parameters))
2135 }
2136 fn gradient(
2137 &self,
2138 parameters: &[Float],
2139 thread_pool: &mut ThreadPool,
2140 ) -> Result<DVector<Float>, LadduError> {
2141 thread_pool.install(|| self.evaluate_gradient(parameters))
2142 }
2143}
2144
2145#[cfg(not(feature = "rayon"))]
2146impl Function<(), LadduError> for LikelihoodEvaluator {
2147 fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
2148 self.evaluate(parameters)
2149 }
2150 fn gradient(
2151 &self,
2152 parameters: &[Float],
2153 _user_data: &mut (),
2154 ) -> Result<DVector<Float>, LadduError> {
2155 self.evaluate_gradient(parameters)
2156 }
2157}
2158
2159pub(crate) struct NegativeLikelihoodEvaluator<'a>(&'a LikelihoodEvaluator);
2160#[cfg(feature = "rayon")]
2161impl Function<ThreadPool, LadduError> for NegativeLikelihoodEvaluator<'_> {
2162 fn evaluate(
2163 &self,
2164 parameters: &[Float],
2165 thread_pool: &mut ThreadPool,
2166 ) -> Result<Float, LadduError> {
2167 Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
2168 }
2169 fn gradient(
2170 &self,
2171 parameters: &[Float],
2172 thread_pool: &mut ThreadPool,
2173 ) -> Result<DVector<Float>, LadduError> {
2174 Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
2175 }
2176}
2177
2178#[cfg(not(feature = "rayon"))]
2179impl Function<(), LadduError> for NegativeLikelihoodEvaluator<'_> {
2180 fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
2181 Function::evaluate(self.0, parameters, user_data).map(|res| -res)
2182 }
2183 fn gradient(
2184 &self,
2185 parameters: &[Float],
2186 user_data: &mut (),
2187 ) -> Result<DVector<Float>, LadduError> {
2188 Function::gradient(self.0, parameters, user_data).map(|res| -res)
2189 }
2190}
2191
2192impl LikelihoodEvaluator {
2193 pub fn parameters(&self) -> Vec<String> {
2195 self.likelihood_manager.parameters()
2196 }
2197 pub fn evaluate(&self, parameters: &[Float]) -> Result<Float, LadduError> {
2200 let mut param_buffers: Vec<Vec<Float>> = self
2201 .likelihood_manager
2202 .param_counts
2203 .iter()
2204 .map(|&count| vec![0.0; count])
2205 .collect();
2206 for (layout, buffer) in self
2207 .likelihood_manager
2208 .param_layouts
2209 .iter()
2210 .zip(param_buffers.iter_mut())
2211 {
2212 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2213 buffer[buffer_idx] = parameters[param_idx];
2214 }
2215 }
2216 let likelihood_values = LikelihoodValues(
2217 self.likelihood_manager
2218 .terms
2219 .iter()
2220 .zip(param_buffers.iter())
2221 .map(|(term, buffer)| term.evaluate(buffer))
2222 .collect(),
2223 );
2224 Ok(self.likelihood_expression.evaluate(&likelihood_values))
2225 }
2226
2227 pub fn evaluate_gradient(&self, parameters: &[Float]) -> Result<DVector<Float>, LadduError> {
2230 let mut param_buffers: Vec<Vec<Float>> = self
2231 .likelihood_manager
2232 .param_counts
2233 .iter()
2234 .map(|&count| vec![0.0; count])
2235 .collect();
2236 for (layout, buffer) in self
2237 .likelihood_manager
2238 .param_layouts
2239 .iter()
2240 .zip(param_buffers.iter_mut())
2241 {
2242 for (buffer_idx, ¶m_idx) in layout.iter().enumerate() {
2243 buffer[buffer_idx] = parameters[param_idx];
2244 }
2245 }
2246 let likelihood_values = LikelihoodValues(
2247 self.likelihood_manager
2248 .terms
2249 .iter()
2250 .zip(param_buffers.iter())
2251 .map(|(term, buffer)| term.evaluate(buffer))
2252 .collect(),
2253 );
2254 let mut gradient_buffers: Vec<DVector<Float>> = (0..self.likelihood_manager.terms.len())
2255 .map(|_| DVector::zeros(self.likelihood_manager.param_names.len()))
2256 .collect();
2257 for (((term, param_buffer), gradient_buffer), layout) in self
2258 .likelihood_manager
2259 .terms
2260 .iter()
2261 .zip(param_buffers.iter())
2262 .zip(gradient_buffers.iter_mut())
2263 .zip(self.likelihood_manager.param_layouts.iter())
2264 {
2265 let term_gradient = term.evaluate_gradient(param_buffer); for (term_idx, &buffer_idx) in layout.iter().enumerate() {
2267 gradient_buffer[buffer_idx] = term_gradient[term_idx] }
2269 }
2270 let likelihood_gradients = LikelihoodGradients(gradient_buffers);
2271 Ok(self
2272 .likelihood_expression
2273 .evaluate_gradient(&likelihood_values, &likelihood_gradients))
2274 }
2275
2276 pub fn minimize(
2281 &self,
2282 p0: &[Float],
2283 bounds: Option<Vec<(Float, Float)>>,
2284 options: Option<MinimizerOptions>,
2285 ) -> Result<Status, LadduError> {
2286 let options = options.unwrap_or_default();
2287 let mut m = Minimizer::new(options.algorithm, self.parameters().len())
2288 .with_bounds(bounds)
2289 .with_max_steps(options.max_steps);
2290 for observer in options.observers {
2291 m = m.with_observer(observer)
2292 }
2293 #[cfg(feature = "rayon")]
2294 {
2295 m.minimize(
2296 self,
2297 p0,
2298 &mut ThreadPoolBuilder::new()
2299 .num_threads(options.threads)
2300 .build()
2301 .map_err(LadduError::from)?,
2302 )?;
2303 }
2304 #[cfg(not(feature = "rayon"))]
2305 {
2306 m.minimize(self, p0, &mut ())?;
2307 }
2308 Ok(m.status)
2309 }
2310
2311 pub fn mcmc<T: AsRef<[DVector<Float>]>>(
2317 &self,
2318 p0: T,
2319 n_steps: usize,
2320 options: Option<MCMCOptions>,
2321 rng: Rng,
2322 ) -> Result<Ensemble, LadduError> {
2323 let options = options.unwrap_or(MCMCOptions::new_ess(
2324 [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
2325 rng,
2326 ));
2327 let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
2328 for observer in options.observers {
2329 m = m.with_observer(observer);
2330 }
2331 let func = NegativeLikelihoodEvaluator(self);
2332 #[cfg(feature = "rayon")]
2333 {
2334 m.sample(
2335 &func,
2336 &mut ThreadPoolBuilder::new()
2337 .num_threads(options.threads)
2338 .build()
2339 .map_err(LadduError::from)?,
2340 n_steps,
2341 )?;
2342 }
2343 #[cfg(not(feature = "rayon"))]
2344 {
2345 m.sample(&func, &mut (), n_steps)?;
2346 }
2347 Ok(m.ensemble)
2348 }
2349}
2350
2351#[cfg(feature = "python")]
2355#[pyclass(name = "LikelihoodEvaluator", module = "laddu")]
2356pub struct PyLikelihoodEvaluator(LikelihoodEvaluator);
2357
2358#[cfg(feature = "python")]
2359#[pymethods]
2360impl PyLikelihoodEvaluator {
2361 #[getter]
2368 fn parameters(&self) -> Vec<String> {
2369 self.0.parameters()
2370 }
2371 #[pyo3(signature = (parameters, *, threads=None))]
2391 fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
2392 #[cfg(feature = "rayon")]
2393 {
2394 Ok(ThreadPoolBuilder::new()
2395 .num_threads(threads.unwrap_or_else(num_cpus::get))
2396 .build()
2397 .map_err(LadduError::from)?
2398 .install(|| self.0.evaluate(¶meters))?)
2399 }
2400 #[cfg(not(feature = "rayon"))]
2401 {
2402 Ok(self.0.evaluate(¶meters)?)
2403 }
2404 }
2405 #[pyo3(signature = (parameters, *, threads=None))]
2427 fn evaluate_gradient<'py>(
2428 &self,
2429 py: Python<'py>,
2430 parameters: Vec<Float>,
2431 threads: Option<usize>,
2432 ) -> PyResult<Bound<'py, PyArray1<Float>>> {
2433 #[cfg(feature = "rayon")]
2434 {
2435 Ok(PyArray1::from_slice(
2436 py,
2437 ThreadPoolBuilder::new()
2438 .num_threads(threads.unwrap_or_else(num_cpus::get))
2439 .build()
2440 .map_err(LadduError::from)?
2441 .install(|| self.0.evaluate_gradient(¶meters))?
2442 .as_slice(),
2443 ))
2444 }
2445 #[cfg(not(feature = "rayon"))]
2446 {
2447 Ok(PyArray1::from_slice(
2448 py,
2449 self.0.evaluate_gradient(¶meters)?.as_slice(),
2450 ))
2451 }
2452 }
2453
2454 #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
2531 #[allow(clippy::too_many_arguments)]
2532 fn minimize(
2533 &self,
2534 p0: Vec<Float>,
2535 bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
2536 method: &str,
2537 max_steps: usize,
2538 debug: bool,
2539 verbose: bool,
2540 kwargs: Option<&Bound<'_, PyDict>>,
2541 ) -> PyResult<PyStatus> {
2542 let bounds = bounds.map(|bounds_vec| {
2543 bounds_vec
2544 .iter()
2545 .map(|(opt_lb, opt_ub)| {
2546 (
2547 opt_lb.unwrap_or(Float::NEG_INFINITY),
2548 opt_ub.unwrap_or(Float::INFINITY),
2549 )
2550 })
2551 .collect()
2552 });
2553 let n_parameters = p0.len();
2554 let options =
2555 py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
2556 let status = self.0.minimize(&p0, bounds, Some(options))?;
2557 Ok(PyStatus(status))
2558 }
2559
2560 #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
2625 #[allow(clippy::too_many_arguments)]
2626 fn mcmc(
2627 &self,
2628 p0: Vec<Vec<Float>>,
2629 n_steps: usize,
2630 method: &str,
2631 debug: bool,
2632 verbose: bool,
2633 seed: u64,
2634 kwargs: Option<&Bound<'_, PyDict>>,
2635 ) -> PyResult<PyEnsemble> {
2636 let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
2637 let mut rng = Rng::new();
2638 rng.seed(seed);
2639 let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
2640 let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
2641 Ok(PyEnsemble(ensemble))
2642 }
2643}
2644
2645#[derive(Clone)]
2647pub struct LikelihoodScalar(String);
2648
2649impl LikelihoodScalar {
2650 pub fn new<T: AsRef<str>>(name: T) -> Box<Self> {
2652 Self(name.as_ref().into()).into()
2653 }
2654}
2655
2656impl LikelihoodTerm for LikelihoodScalar {
2657 fn evaluate(&self, parameters: &[Float]) -> Float {
2658 parameters[0]
2659 }
2660
2661 fn evaluate_gradient(&self, _parameters: &[Float]) -> DVector<Float> {
2662 DVector::from_vec(vec![1.0])
2663 }
2664
2665 fn parameters(&self) -> Vec<String> {
2666 vec![self.0.clone()]
2667 }
2668}
2669
2670#[cfg(feature = "python")]
2682#[pyfunction(name = "LikelihoodScalar")]
2683pub fn py_likelihood_scalar(name: String) -> PyLikelihoodTerm {
2684 PyLikelihoodTerm(LikelihoodScalar::new(name))
2685}