laddu_extensions/
likelihoods.rs

1use std::{
2    collections::HashMap,
3    fmt::{Debug, Display},
4    sync::Arc,
5};
6
7use accurate::{sum::Klein, traits::*};
8use auto_ops::*;
9use dyn_clone::DynClone;
10use fastrand::Rng;
11use ganesh::{
12    traits::AbortSignal, Ensemble, Function, Minimizer, Sampler, Status, Swarm, SwarmMinimizer,
13};
14use laddu_core::{
15    amplitudes::{central_difference, AmplitudeValues, Evaluator, GradientValues, Model},
16    data::Dataset,
17    resources::Parameters,
18    Complex, DVector, Float, LadduError,
19};
20
21#[cfg(feature = "mpi")]
22use laddu_core::mpi::LadduMPI;
23
24#[cfg(feature = "mpi")]
25use mpi::{datatype::PartitionMut, topology::SimpleCommunicator, traits::*};
26
27#[cfg(feature = "python")]
28use crate::ganesh_ext::py_ganesh::{
29    py_parse_mcmc_options, py_parse_minimizer_options, py_parse_swarm_options, PyEnsemble,
30    PyStatus, PySwarm,
31};
32#[cfg(feature = "python")]
33use laddu_python::{
34    amplitudes::{PyEvaluator, PyModel},
35    data::PyDataset,
36};
37#[cfg(feature = "python")]
38use numpy::PyArray1;
39#[cfg(feature = "python")]
40use pyo3::{exceptions::PyTypeError, prelude::*, types::PyList};
41#[cfg(feature = "rayon")]
42use rayon::{prelude::*, ThreadPool, ThreadPoolBuilder};
43
44use crate::ganesh_ext::{MCMCOptions, MinimizerOptions, SwarmOptions};
45
46/// A trait which describes a term that can be used like a likelihood (more correctly, a negative
47/// log-likelihood) in a minimization.
48pub trait LikelihoodTerm: DynClone + Send + Sync {
49    /// Evaluate the term given some input parameters.
50    fn evaluate(&self, parameters: &[Float]) -> Float;
51    /// Evaluate the gradient of the term given some input parameters.
52    fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
53        central_difference(parameters, |parameters: &[Float]| self.evaluate(parameters))
54    }
55    /// The list of names of the input parameters for [`LikelihoodTerm::evaluate`].
56    fn parameters(&self) -> Vec<String>;
57}
58
59dyn_clone::clone_trait_object!(LikelihoodTerm);
60
61/// A term in an expression with multiple likelihood components
62///
63/// See Also
64/// --------
65/// NLL.as_term
66///
67#[cfg(feature = "python")]
68#[pyclass(name = "LikelihoodTerm", module = "laddu")]
69#[derive(Clone)]
70pub struct PyLikelihoodTerm(pub Box<dyn LikelihoodTerm>);
71
72/// An extended, unbinned negative log-likelihood evaluator.
73#[derive(Clone)]
74pub struct NLL {
75    /// The internal [`Evaluator`] for data
76    pub data_evaluator: Evaluator,
77    /// The internal [`Evaluator`] for accepted Monte Carlo
78    pub accmc_evaluator: Evaluator,
79}
80
81impl NLL {
82    /// Construct an [`NLL`] from a [`Model`] and two [`Dataset`]s (data and Monte Carlo). This is the equivalent of the [`Model::load`] method,
83    /// but for two [`Dataset`]s and a different method of evaluation.
84    pub fn new(model: &Model, ds_data: &Arc<Dataset>, ds_accmc: &Arc<Dataset>) -> Box<Self> {
85        Self {
86            data_evaluator: model.load(ds_data),
87            accmc_evaluator: model.load(ds_accmc),
88        }
89        .into()
90    }
91    /// Activate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name.
92    pub fn activate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
93        self.data_evaluator.activate(&name)?;
94        self.accmc_evaluator.activate(&name)
95    }
96    /// Activate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name.
97    pub fn activate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
98        self.data_evaluator.activate_many(names)?;
99        self.accmc_evaluator.activate_many(names)
100    }
101    /// Activate all registered [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s.
102    pub fn activate_all(&self) {
103        self.data_evaluator.activate_all();
104        self.accmc_evaluator.activate_all();
105    }
106    /// Dectivate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name.
107    pub fn deactivate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
108        self.data_evaluator.deactivate(&name)?;
109        self.accmc_evaluator.deactivate(&name)
110    }
111    /// Deactivate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name.
112    pub fn deactivate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
113        self.data_evaluator.deactivate_many(names)?;
114        self.accmc_evaluator.deactivate_many(names)
115    }
116    /// Deactivate all registered [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s.
117    pub fn deactivate_all(&self) {
118        self.data_evaluator.deactivate_all();
119        self.accmc_evaluator.deactivate_all();
120    }
121    /// Isolate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name (deactivate the rest).
122    pub fn isolate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
123        self.data_evaluator.isolate(&name)?;
124        self.accmc_evaluator.isolate(&name)
125    }
126    /// Isolate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name (deactivate the rest).
127    pub fn isolate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
128        self.data_evaluator.isolate_many(names)?;
129        self.accmc_evaluator.isolate_many(names)
130    }
131
132    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
133    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
134    /// Monte-Carlo event (non-MPI version).
135    ///
136    /// # Notes
137    ///
138    /// This method is not intended to be called in analyses but rather in writing methods
139    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project`] instead.
140    pub fn project_local(
141        &self,
142        parameters: &[Float],
143        mc_evaluator: Option<Evaluator>,
144    ) -> Vec<Float> {
145        let (mc_dataset, result) = if let Some(mc_evaluator) = mc_evaluator {
146            (
147                mc_evaluator.dataset.clone(),
148                mc_evaluator.evaluate_local(parameters),
149            )
150        } else {
151            (
152                self.accmc_evaluator.dataset.clone(),
153                self.accmc_evaluator.evaluate_local(parameters),
154            )
155        };
156        let n_mc = self.accmc_evaluator.dataset.n_events();
157        #[cfg(feature = "rayon")]
158        let output: Vec<Float> = result
159            .par_iter()
160            .zip(mc_dataset.events.par_iter())
161            .map(|(l, e)| e.weight * l.re / n_mc as Float)
162            .collect();
163
164        #[cfg(not(feature = "rayon"))]
165        let output: Vec<Float> = result
166            .iter()
167            .zip(mc_dataset.events.iter())
168            .map(|(l, e)| e.weight * l.re / n_mc as Float)
169            .collect();
170        output
171    }
172
173    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
174    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
175    /// Monte-Carlo event (MPI-compatible version).
176    ///
177    /// # Notes
178    ///
179    /// This method is not intended to be called in analyses but rather in writing methods
180    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project`] instead.
181    #[cfg(feature = "mpi")]
182    pub fn project_mpi(
183        &self,
184        parameters: &[Float],
185        mc_evaluator: Option<Evaluator>,
186        world: &SimpleCommunicator,
187    ) -> Vec<Float> {
188        let n_events = mc_evaluator
189            .as_ref()
190            .unwrap_or(&self.accmc_evaluator)
191            .dataset
192            .n_events();
193        let local_projection = self.project_local(parameters, mc_evaluator);
194        let mut buffer: Vec<Float> = vec![0.0; n_events];
195        let (counts, displs) = world.get_counts_displs(n_events);
196        {
197            let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
198            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
199        }
200        buffer
201    }
202
203    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
204    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
205    /// Monte-Carlo event. This method takes the real part of the given expression (discarding
206    /// the imaginary part entirely, which does not matter if expressions are coherent sums
207    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
208    /// Event weights are determined by the following formula:
209    ///
210    /// ```math
211    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
212    /// ```
213    ///
214    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
215    /// regardless of the `mc_evaluator`.
216    pub fn project(&self, parameters: &[Float], mc_evaluator: Option<Evaluator>) -> Vec<Float> {
217        #[cfg(feature = "mpi")]
218        {
219            if let Some(world) = laddu_core::mpi::get_world() {
220                return self.project_mpi(parameters, mc_evaluator, &world);
221            }
222        }
223        self.project_local(parameters, mc_evaluator)
224    }
225
226    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
227    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
228    /// those weights for each Monte-Carlo event (non-MPI version).
229    ///
230    /// # Notes
231    ///
232    /// This method is not intended to be called in analyses but rather in writing methods
233    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_gradient`] instead.
234    pub fn project_gradient_local(
235        &self,
236        parameters: &[Float],
237        mc_evaluator: Option<Evaluator>,
238    ) -> (Vec<Float>, Vec<DVector<Float>>) {
239        let (mc_dataset, result, result_gradient) = if let Some(mc_evaluator) = mc_evaluator {
240            (
241                mc_evaluator.dataset.clone(),
242                mc_evaluator.evaluate_local(parameters),
243                mc_evaluator.evaluate_gradient_local(parameters),
244            )
245        } else {
246            (
247                self.accmc_evaluator.dataset.clone(),
248                self.accmc_evaluator.evaluate_local(parameters),
249                self.accmc_evaluator.evaluate_gradient_local(parameters),
250            )
251        };
252        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
253        #[cfg(feature = "rayon")]
254        {
255            (
256                result
257                    .par_iter()
258                    .zip(mc_dataset.events.par_iter())
259                    .map(|(l, e)| e.weight * l.re / n_mc)
260                    .collect(),
261                result_gradient
262                    .par_iter()
263                    .zip(mc_dataset.events.par_iter())
264                    .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
265                    .collect(),
266            )
267        }
268        #[cfg(not(feature = "rayon"))]
269        {
270            (
271                result
272                    .iter()
273                    .zip(mc_dataset.events.iter())
274                    .map(|(l, e)| e.weight * l.re / n_mc)
275                    .collect(),
276                result_gradient
277                    .iter()
278                    .zip(mc_dataset.events.iter())
279                    .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
280                    .collect(),
281            )
282        }
283    }
284
285    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
286    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
287    /// those weights for each Monte-Carlo event (MPI-compatible version).
288    ///
289    /// # Notes
290    ///
291    /// This method is not intended to be called in analyses but rather in writing methods
292    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_gradient`] instead.
293    #[cfg(feature = "mpi")]
294    pub fn project_gradient_mpi(
295        &self,
296        parameters: &[Float],
297        mc_evaluator: Option<Evaluator>,
298        world: &SimpleCommunicator,
299    ) -> (Vec<Float>, Vec<DVector<Float>>) {
300        let n_events = mc_evaluator
301            .as_ref()
302            .unwrap_or(&self.accmc_evaluator)
303            .dataset
304            .n_events();
305        let (local_projection, local_gradient_projection) =
306            self.project_gradient_local(parameters, mc_evaluator);
307        let mut projection_result: Vec<Float> = vec![0.0; n_events];
308        let (counts, displs) = world.get_counts_displs(n_events);
309        {
310            let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
311            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
312        }
313
314        let flattened_local_gradient_projection = local_gradient_projection
315            .iter()
316            .flat_map(|g| g.data.as_vec().to_vec())
317            .collect::<Vec<Float>>();
318        let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
319        let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
320        let mut partitioned_flattened_result_buffer =
321            PartitionMut::new(&mut flattened_result_buffer, counts, displs);
322        world.all_gather_varcount_into(
323            &flattened_local_gradient_projection,
324            &mut partitioned_flattened_result_buffer,
325        );
326        let gradient_projection_result = flattened_result_buffer
327            .chunks(parameters.len())
328            .map(DVector::from_row_slice)
329            .collect();
330        (projection_result, gradient_projection_result)
331    }
332    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
333    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
334    /// those weights for each Monte-Carlo event. This method takes the real part of the given
335    /// expression (discarding the imaginary part entirely, which does not matter if expressions
336    /// are coherent sums wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
337    /// Event weights are determined by the following formula:
338    ///
339    /// ```math
340    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
341    /// ```
342    ///
343    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
344    /// regardless of the `mc_evaluator`.
345    pub fn project_gradient(
346        &self,
347        parameters: &[Float],
348        mc_evaluator: Option<Evaluator>,
349    ) -> (Vec<Float>, Vec<DVector<Float>>) {
350        #[cfg(feature = "mpi")]
351        {
352            if let Some(world) = laddu_core::mpi::get_world() {
353                return self.project_gradient_mpi(parameters, mc_evaluator, &world);
354            }
355        }
356        self.project_gradient_local(parameters, mc_evaluator)
357    }
358
359    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
360    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
361    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
362    /// by name, but returns the [`NLL`] to its prior state after calculation (non-MPI version).
363    ///
364    /// # Notes
365    ///
366    /// This method is not intended to be called in analyses but rather in writing methods
367    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
368    pub fn project_with_local<T: AsRef<str>>(
369        &self,
370        parameters: &[Float],
371        names: &[T],
372        mc_evaluator: Option<Evaluator>,
373    ) -> Result<Vec<Float>, LadduError> {
374        if let Some(mc_evaluator) = &mc_evaluator {
375            let current_active_mc = mc_evaluator.resources.read().active.clone();
376            mc_evaluator.isolate_many(names)?;
377            let mc_dataset = mc_evaluator.dataset.clone();
378            let result = mc_evaluator.evaluate_local(parameters);
379            let n_mc = self.accmc_evaluator.dataset.n_events();
380            #[cfg(feature = "rayon")]
381            let output: Vec<Float> = result
382                .par_iter()
383                .zip(mc_dataset.events.par_iter())
384                .map(|(l, e)| e.weight * l.re / n_mc as Float)
385                .collect();
386            #[cfg(not(feature = "rayon"))]
387            let output: Vec<Float> = result
388                .iter()
389                .zip(mc_dataset.events.iter())
390                .map(|(l, e)| e.weight * l.re / n_mc as Float)
391                .collect();
392            mc_evaluator.resources.write().active = current_active_mc;
393            Ok(output)
394        } else {
395            let current_active_data = self.data_evaluator.resources.read().active.clone();
396            let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
397            self.isolate_many(names)?;
398            let mc_dataset = &self.accmc_evaluator.dataset;
399            let result = self.accmc_evaluator.evaluate_local(parameters);
400            let n_mc = self.accmc_evaluator.dataset.n_events();
401            #[cfg(feature = "rayon")]
402            let output: Vec<Float> = result
403                .par_iter()
404                .zip(mc_dataset.events.par_iter())
405                .map(|(l, e)| e.weight * l.re / n_mc as Float)
406                .collect();
407            #[cfg(not(feature = "rayon"))]
408            let output: Vec<Float> = result
409                .iter()
410                .zip(mc_dataset.events.iter())
411                .map(|(l, e)| e.weight * l.re / n_mc as Float)
412                .collect();
413            self.data_evaluator.resources.write().active = current_active_data;
414            self.accmc_evaluator.resources.write().active = current_active_accmc;
415            Ok(output)
416        }
417    }
418
419    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
420    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
421    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
422    /// by name, but returns the [`NLL`] to its prior state after calculation (MPI-compatible version).
423    ///
424    /// # Notes
425    ///
426    /// This method is not intended to be called in analyses but rather in writing methods
427    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
428    #[cfg(feature = "mpi")]
429    pub fn project_with_mpi<T: AsRef<str>>(
430        &self,
431        parameters: &[Float],
432        names: &[T],
433        mc_evaluator: Option<Evaluator>,
434        world: &SimpleCommunicator,
435    ) -> Result<Vec<Float>, LadduError> {
436        let n_events = mc_evaluator
437            .as_ref()
438            .unwrap_or(&self.accmc_evaluator)
439            .dataset
440            .n_events();
441        let local_projection = self.project_with_local(parameters, names, mc_evaluator)?;
442        let mut buffer: Vec<Float> = vec![0.0; n_events];
443        let (counts, displs) = world.get_counts_displs(n_events);
444        {
445            let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
446            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
447        }
448        Ok(buffer)
449    }
450
451    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
452    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
453    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
454    /// by name, but returns the [`NLL`] to its prior state after calculation.
455    ///
456    /// This method takes the real part of the given expression (discarding
457    /// the imaginary part entirely, which does not matter if expressions are coherent sums
458    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
459    /// Event weights are determined by the following formula:
460    ///
461    /// ```math
462    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
463    /// ```
464    ///
465    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
466    /// regardless of the `mc_evaluator`.
467    pub fn project_with<T: AsRef<str>>(
468        &self,
469        parameters: &[Float],
470        names: &[T],
471        mc_evaluator: Option<Evaluator>,
472    ) -> Result<Vec<Float>, LadduError> {
473        #[cfg(feature = "mpi")]
474        {
475            if let Some(world) = laddu_core::mpi::get_world() {
476                return self.project_with_mpi(parameters, names, mc_evaluator, &world);
477            }
478        }
479        self.project_with_local(parameters, names, mc_evaluator)
480    }
481
482    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
483    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
484    /// those weights for each Monte-Carlo event. This method differs from the standard
485    /// [`NLL::project_gradient`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
486    /// by name, but returns the [`NLL`] to its prior state after calculation (non-MPI version).
487    ///
488    /// # Notes
489    ///
490    /// This method is not intended to be called in analyses but rather in writing methods
491    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
492    pub fn project_gradient_with_local<T: AsRef<str>>(
493        &self,
494        parameters: &[Float],
495        names: &[T],
496        mc_evaluator: Option<Evaluator>,
497    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
498        if let Some(mc_evaluator) = &mc_evaluator {
499            let current_active_mc = mc_evaluator.resources.read().active.clone();
500            mc_evaluator.isolate_many(names)?;
501            let mc_dataset = mc_evaluator.dataset.clone();
502            let result = mc_evaluator.evaluate_local(parameters);
503            let result_gradient = mc_evaluator.evaluate_gradient(parameters);
504            let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
505            #[cfg(feature = "rayon")]
506            let (res, res_gradient) = {
507                (
508                    result
509                        .par_iter()
510                        .zip(mc_dataset.events.par_iter())
511                        .map(|(l, e)| e.weight * l.re / n_mc)
512                        .collect(),
513                    result_gradient
514                        .par_iter()
515                        .zip(mc_dataset.events.par_iter())
516                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
517                        .collect(),
518                )
519            };
520            #[cfg(not(feature = "rayon"))]
521            let (res, res_gradient) = {
522                (
523                    result
524                        .iter()
525                        .zip(mc_dataset.events.iter())
526                        .map(|(l, e)| e.weight * l.re / n_mc)
527                        .collect(),
528                    result_gradient
529                        .iter()
530                        .zip(mc_dataset.events.iter())
531                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
532                        .collect(),
533                )
534            };
535            mc_evaluator.resources.write().active = current_active_mc;
536            Ok((res, res_gradient))
537        } else {
538            let current_active_data = self.data_evaluator.resources.read().active.clone();
539            let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
540            self.isolate_many(names)?;
541            let mc_dataset = &self.accmc_evaluator.dataset;
542            let result = self.accmc_evaluator.evaluate_local(parameters);
543            let result_gradient = self.accmc_evaluator.evaluate_gradient(parameters);
544            let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
545            #[cfg(feature = "rayon")]
546            let (res, res_gradient) = {
547                (
548                    result
549                        .par_iter()
550                        .zip(mc_dataset.events.par_iter())
551                        .map(|(l, e)| e.weight * l.re / n_mc)
552                        .collect(),
553                    result_gradient
554                        .par_iter()
555                        .zip(mc_dataset.events.par_iter())
556                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
557                        .collect(),
558                )
559            };
560            #[cfg(not(feature = "rayon"))]
561            let (res, res_gradient) = {
562                (
563                    result
564                        .iter()
565                        .zip(mc_dataset.events.iter())
566                        .map(|(l, e)| e.weight * l.re / n_mc)
567                        .collect(),
568                    result_gradient
569                        .iter()
570                        .zip(mc_dataset.events.iter())
571                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
572                        .collect(),
573                )
574            };
575            self.data_evaluator.resources.write().active = current_active_data;
576            self.accmc_evaluator.resources.write().active = current_active_accmc;
577            Ok((res, res_gradient))
578        }
579    }
580
581    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
582    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
583    /// those weights for each Monte-Carlo event. This method differs from the standard
584    /// [`NLL::project_gradient`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
585    /// by name, but returns the [`NLL`] to its prior state after calculation (MPI-compatible version).
586    ///
587    /// # Notes
588    ///
589    /// This method is not intended to be called in analyses but rather in writing methods
590    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
591    #[cfg(feature = "mpi")]
592    pub fn project_gradient_with_mpi<T: AsRef<str>>(
593        &self,
594        parameters: &[Float],
595        names: &[T],
596        mc_evaluator: Option<Evaluator>,
597        world: &SimpleCommunicator,
598    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
599        let n_events = mc_evaluator
600            .as_ref()
601            .unwrap_or(&self.accmc_evaluator)
602            .dataset
603            .n_events();
604        let (local_projection, local_gradient_projection) =
605            self.project_gradient_with_local(parameters, names, mc_evaluator)?;
606        let mut projection_result: Vec<Float> = vec![0.0; n_events];
607        let (counts, displs) = world.get_counts_displs(n_events);
608        {
609            let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
610            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
611        }
612
613        let flattened_local_gradient_projection = local_gradient_projection
614            .iter()
615            .flat_map(|g| g.data.as_vec().to_vec())
616            .collect::<Vec<Float>>();
617        let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
618        let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
619        let mut partitioned_flattened_result_buffer =
620            PartitionMut::new(&mut flattened_result_buffer, counts, displs);
621        world.all_gather_varcount_into(
622            &flattened_local_gradient_projection,
623            &mut partitioned_flattened_result_buffer,
624        );
625        let gradient_projection_result = flattened_result_buffer
626            .chunks(parameters.len())
627            .map(DVector::from_row_slice)
628            .collect();
629        Ok((projection_result, gradient_projection_result))
630    }
631    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
632    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
633    /// those weights for each
634    /// Monte-Carlo event. This method differs from the standard [`NLL::project_gradient`] in that it first
635    /// isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name, but returns
636    /// the [`NLL`] to its prior state after calculation.
637    ///
638    /// This method takes the real part of the given expression (discarding
639    /// the imaginary part entirely, which does not matter if expressions are coherent sums
640    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
641    /// Event weights are determined by the following formula:
642    ///
643    /// ```math
644    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
645    /// ```
646    ///
647    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
648    /// regardless of the `mc_evaluator`.
649    pub fn project_gradient_with<T: AsRef<str>>(
650        &self,
651        parameters: &[Float],
652        names: &[T],
653        mc_evaluator: Option<Evaluator>,
654    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
655        #[cfg(feature = "mpi")]
656        {
657            if let Some(world) = laddu_core::mpi::get_world() {
658                return self.project_gradient_with_mpi(parameters, names, mc_evaluator, &world);
659            }
660        }
661        self.project_gradient_with_local(parameters, names, mc_evaluator)
662    }
663
664    fn evaluate_local(&self, parameters: &[Float]) -> Float {
665        let data_result = self.data_evaluator.evaluate_local(parameters);
666        let mc_result = self.accmc_evaluator.evaluate_local(parameters);
667        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
668        #[cfg(feature = "rayon")]
669        let data_term: Float = data_result
670            .par_iter()
671            .zip(self.data_evaluator.dataset.events.par_iter())
672            .map(|(l, e)| e.weight * Float::ln(l.re))
673            .parallel_sum_with_accumulator::<Klein<Float>>();
674        #[cfg(feature = "rayon")]
675        let mc_term: Float = mc_result
676            .par_iter()
677            .zip(self.accmc_evaluator.dataset.events.par_iter())
678            .map(|(l, e)| e.weight * l.re)
679            .parallel_sum_with_accumulator::<Klein<Float>>();
680        #[cfg(not(feature = "rayon"))]
681        let data_term: Float = data_result
682            .iter()
683            .zip(self.data_evaluator.dataset.events.iter())
684            .map(|(l, e)| e.weight * Float::ln(l.re))
685            .sum_with_accumulator::<Klein<Float>>();
686        #[cfg(not(feature = "rayon"))]
687        let mc_term: Float = mc_result
688            .iter()
689            .zip(self.accmc_evaluator.dataset.events.iter())
690            .map(|(l, e)| e.weight * l.re)
691            .sum_with_accumulator::<Klein<Float>>();
692        -2.0 * (data_term - mc_term / n_mc)
693    }
694
695    #[cfg(feature = "mpi")]
696    fn evaluate_mpi(&self, parameters: &[Float], world: &SimpleCommunicator) -> Float {
697        let local_evaluation = self.evaluate_local(parameters);
698        let mut buffer: Vec<Float> = vec![0.0; world.size() as usize];
699        world.all_gather_into(&local_evaluation, &mut buffer);
700        buffer.iter().sum()
701    }
702
703    fn evaluate_gradient_local(&self, parameters: &[Float]) -> DVector<Float> {
704        let data_resources = self.data_evaluator.resources.read();
705        let data_parameters = Parameters::new(parameters, &data_resources.constants);
706        let mc_resources = self.accmc_evaluator.resources.read();
707        let mc_parameters = Parameters::new(parameters, &mc_resources.constants);
708        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
709        #[cfg(feature = "rayon")]
710        let data_term: DVector<Float> = self
711            .data_evaluator
712            .dataset
713            .events
714            .par_iter()
715            .zip(data_resources.caches.par_iter())
716            .map(|(event, cache)| {
717                let mut gradient_values =
718                    vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
719                self.data_evaluator
720                    .amplitudes
721                    .iter()
722                    .zip(data_resources.active.iter())
723                    .zip(gradient_values.iter_mut())
724                    .for_each(|((amp, active), grad)| {
725                        if *active {
726                            amp.compute_gradient(&data_parameters, event, cache, grad)
727                        }
728                    });
729                (
730                    event.weight,
731                    AmplitudeValues(
732                        self.data_evaluator
733                            .amplitudes
734                            .iter()
735                            .zip(data_resources.active.iter())
736                            .map(|(amp, active)| {
737                                if *active {
738                                    amp.compute(&data_parameters, event, cache)
739                                } else {
740                                    Complex::ZERO
741                                }
742                            })
743                            .collect(),
744                    ),
745                    GradientValues(parameters.len(), gradient_values),
746                )
747            })
748            .map(|(weight, amp_vals, grad_vals)| {
749                (
750                    weight,
751                    self.data_evaluator.expression.evaluate(&amp_vals),
752                    self.data_evaluator
753                        .expression
754                        .evaluate_gradient(&amp_vals, &grad_vals),
755                )
756            })
757            .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
758            .collect::<Vec<DVector<Float>>>()
759            .iter()
760            .sum(); // TODO: replace with custom implementation of accurate crate's trait
761        #[cfg(feature = "rayon")]
762        let mc_term: DVector<Float> = self
763            .accmc_evaluator
764            .dataset
765            .events
766            .par_iter()
767            .zip(mc_resources.caches.par_iter())
768            .map(|(event, cache)| {
769                let mut gradient_values =
770                    vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
771                self.accmc_evaluator
772                    .amplitudes
773                    .iter()
774                    .zip(mc_resources.active.iter())
775                    .zip(gradient_values.iter_mut())
776                    .for_each(|((amp, active), grad)| {
777                        if *active {
778                            amp.compute_gradient(&mc_parameters, event, cache, grad)
779                        }
780                    });
781                (
782                    event.weight,
783                    AmplitudeValues(
784                        self.accmc_evaluator
785                            .amplitudes
786                            .iter()
787                            .zip(mc_resources.active.iter())
788                            .map(|(amp, active)| {
789                                if *active {
790                                    amp.compute(&mc_parameters, event, cache)
791                                } else {
792                                    Complex::ZERO
793                                }
794                            })
795                            .collect(),
796                    ),
797                    GradientValues(parameters.len(), gradient_values),
798                )
799            })
800            .map(|(weight, amp_vals, grad_vals)| {
801                (
802                    weight,
803                    self.accmc_evaluator
804                        .expression
805                        .evaluate_gradient(&amp_vals, &grad_vals),
806                )
807            })
808            .map(|(w, g)| w * g.map(|gi| gi.re))
809            .collect::<Vec<DVector<Float>>>()
810            .iter()
811            .sum();
812        #[cfg(not(feature = "rayon"))]
813        let data_term: DVector<Float> = self
814            .data_evaluator
815            .dataset
816            .events
817            .iter()
818            .zip(data_resources.caches.iter())
819            .map(|(event, cache)| {
820                let mut gradient_values =
821                    vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
822                self.data_evaluator
823                    .amplitudes
824                    .iter()
825                    .zip(data_resources.active.iter())
826                    .zip(gradient_values.iter_mut())
827                    .for_each(|((amp, active), grad)| {
828                        if *active {
829                            amp.compute_gradient(&data_parameters, event, cache, grad)
830                        }
831                    });
832                (
833                    event.weight,
834                    AmplitudeValues(
835                        self.data_evaluator
836                            .amplitudes
837                            .iter()
838                            .zip(data_resources.active.iter())
839                            .map(|(amp, active)| {
840                                if *active {
841                                    amp.compute(&data_parameters, event, cache)
842                                } else {
843                                    Complex::ZERO
844                                }
845                            })
846                            .collect(),
847                    ),
848                    GradientValues(parameters.len(), gradient_values),
849                )
850            })
851            .map(|(weight, amp_vals, grad_vals)| {
852                (
853                    weight,
854                    self.data_evaluator.expression.evaluate(&amp_vals),
855                    self.data_evaluator
856                        .expression
857                        .evaluate_gradient(&amp_vals, &grad_vals),
858                )
859            })
860            .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
861            .sum();
862        #[cfg(not(feature = "rayon"))]
863        let mc_term: DVector<Float> = self
864            .accmc_evaluator
865            .dataset
866            .events
867            .iter()
868            .zip(mc_resources.caches.iter())
869            .map(|(event, cache)| {
870                let mut gradient_values =
871                    vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
872                self.accmc_evaluator
873                    .amplitudes
874                    .iter()
875                    .zip(mc_resources.active.iter())
876                    .zip(gradient_values.iter_mut())
877                    .for_each(|((amp, active), grad)| {
878                        if *active {
879                            amp.compute_gradient(&mc_parameters, event, cache, grad)
880                        }
881                    });
882                (
883                    event.weight,
884                    AmplitudeValues(
885                        self.accmc_evaluator
886                            .amplitudes
887                            .iter()
888                            .zip(mc_resources.active.iter())
889                            .map(|(amp, active)| {
890                                if *active {
891                                    amp.compute(&mc_parameters, event, cache)
892                                } else {
893                                    Complex::ZERO
894                                }
895                            })
896                            .collect(),
897                    ),
898                    GradientValues(parameters.len(), gradient_values),
899                )
900            })
901            .map(|(weight, amp_vals, grad_vals)| {
902                (
903                    weight,
904                    self.accmc_evaluator
905                        .expression
906                        .evaluate_gradient(&amp_vals, &grad_vals),
907                )
908            })
909            .map(|(w, g)| w * g.map(|gi| gi.re))
910            .sum();
911        -2.0 * (data_term - mc_term / n_mc)
912    }
913
914    #[cfg(feature = "mpi")]
915    fn evaluate_gradient_mpi(
916        &self,
917        parameters: &[Float],
918        world: &SimpleCommunicator,
919    ) -> DVector<Float> {
920        let local_evaluation_vec = self
921            .evaluate_gradient_local(parameters)
922            .data
923            .as_vec()
924            .to_vec();
925        let mut flattened_result_buffer = vec![0.0; world.size() as usize * parameters.len()];
926        world.all_gather_into(&local_evaluation_vec, &mut flattened_result_buffer);
927        flattened_result_buffer
928            .chunks(parameters.len())
929            .map(DVector::from_row_slice)
930            .sum::<DVector<Float>>()
931    }
932}
933
934impl LikelihoodTerm for NLL {
935    /// Get the list of parameter names in the order they appear in the [`NLL::evaluate`]
936    /// method.
937    fn parameters(&self) -> Vec<String> {
938        self.data_evaluator
939            .resources
940            .read()
941            .parameters
942            .iter()
943            .cloned()
944            .collect()
945    }
946
947    /// Evaluate the stored [`Model`] over the events in the [`Dataset`] stored by the
948    /// [`Evaluator`] with the given values for free parameters. This method takes the
949    /// real part of the given expression (discarding the imaginary part entirely, which
950    /// does not matter if expressions are coherent sums wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`). The
951    /// result is given by the following formula:
952    ///
953    /// ```math
954    /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e)) - \frac{1}{N_{\text{MC}_A}} \sum_{e \in \text{MC}_A} \text{weight}(e) \mathcal{L}(e) \right)
955    /// ```
956    fn evaluate(&self, parameters: &[Float]) -> Float {
957        #[cfg(feature = "mpi")]
958        {
959            if let Some(world) = laddu_core::mpi::get_world() {
960                return self.evaluate_mpi(parameters, &world);
961            }
962        }
963        self.evaluate_local(parameters)
964    }
965
966    /// Evaluate the gradient of the stored [`Model`] over the events in the [`Dataset`]
967    /// stored by the [`Evaluator`] with the given values for free parameters. This method takes the
968    /// real part of the given expression (discarding the imaginary part entirely, which
969    /// does not matter if expressions are coherent sums wrapped in
970    /// [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
971    fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
972        #[cfg(feature = "mpi")]
973        {
974            if let Some(world) = laddu_core::mpi::get_world() {
975                return self.evaluate_gradient_mpi(parameters, &world);
976            }
977        }
978        self.evaluate_gradient_local(parameters)
979    }
980}
981
982#[cfg(feature = "rayon")]
983impl Function<ThreadPool, LadduError> for NLL {
984    fn evaluate(
985        &self,
986        parameters: &[Float],
987        thread_pool: &mut ThreadPool,
988    ) -> Result<Float, LadduError> {
989        Ok(thread_pool.install(|| LikelihoodTerm::evaluate(self, parameters)))
990    }
991    fn gradient(
992        &self,
993        parameters: &[Float],
994        thread_pool: &mut ThreadPool,
995    ) -> Result<DVector<Float>, LadduError> {
996        Ok(thread_pool.install(|| LikelihoodTerm::evaluate_gradient(self, parameters)))
997    }
998}
999
1000#[cfg(not(feature = "rayon"))]
1001impl Function<(), LadduError> for NLL {
1002    fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
1003        Ok(LikelihoodTerm::evaluate(self, parameters))
1004    }
1005    fn gradient(
1006        &self,
1007        parameters: &[Float],
1008        _user_data: &mut (),
1009    ) -> Result<DVector<Float>, LadduError> {
1010        Ok(LikelihoodTerm::evaluate_gradient(self, parameters))
1011    }
1012}
1013
1014pub(crate) struct LogLikelihood<'a>(&'a NLL);
1015
1016#[cfg(feature = "rayon")]
1017impl Function<ThreadPool, LadduError> for LogLikelihood<'_> {
1018    fn evaluate(
1019        &self,
1020        parameters: &[Float],
1021        thread_pool: &mut ThreadPool,
1022    ) -> Result<Float, LadduError> {
1023        Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
1024    }
1025    fn gradient(
1026        &self,
1027        parameters: &[Float],
1028        thread_pool: &mut ThreadPool,
1029    ) -> Result<DVector<Float>, LadduError> {
1030        Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
1031    }
1032}
1033
1034#[cfg(not(feature = "rayon"))]
1035impl Function<(), LadduError> for LogLikelihood<'_> {
1036    fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
1037        Function::evaluate(self.0, parameters, user_data).map(|res| -res)
1038    }
1039    fn gradient(
1040        &self,
1041        parameters: &[Float],
1042        user_data: &mut (),
1043    ) -> Result<DVector<Float>, LadduError> {
1044        Function::gradient(self.0, parameters, user_data).map(|res| -res)
1045    }
1046}
1047
1048impl NLL {
1049    /// Minimizes the negative log-likelihood using the L-BFGS-B algorithm (by default), a limited-memory
1050    /// quasi-Newton minimizer which supports bounded optimization.
1051    pub fn minimize(
1052        &self,
1053        p0: &[Float],
1054        bounds: Option<Vec<(Float, Float)>>,
1055        options: Option<MinimizerOptions>,
1056    ) -> Result<Status, LadduError> {
1057        let options = options.unwrap_or_default();
1058        let mut m = Minimizer::new(options.algorithm, self.parameters().len())
1059            .with_max_steps(options.max_steps);
1060        if let Some(bounds) = bounds {
1061            m = m.with_bounds(bounds);
1062        }
1063        for observer in options.observers {
1064            m = m.with_observer(observer);
1065        }
1066        #[cfg(feature = "rayon")]
1067        {
1068            m.minimize(
1069                self,
1070                p0,
1071                &mut ThreadPoolBuilder::new()
1072                    .num_threads(options.threads)
1073                    .build()
1074                    .map_err(LadduError::from)?,
1075                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1076            )?;
1077        }
1078        #[cfg(not(feature = "rayon"))]
1079        {
1080            m.minimize(
1081                self,
1082                p0,
1083                &mut (),
1084                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1085            )?;
1086        }
1087        Ok(m.status)
1088    }
1089    /// Perform Markov Chain Monte Carlo sampling on this [`NLL`]. By default, this uses the [`ESS`](`ganesh::algorithms::mcmc::ESS`) sampler.
1090    pub fn mcmc<T: AsRef<[DVector<Float>]>>(
1091        &self,
1092        p0: T,
1093        n_steps: usize,
1094        options: Option<MCMCOptions>,
1095        rng: Rng,
1096    ) -> Result<Ensemble, LadduError> {
1097        let options = options.unwrap_or(MCMCOptions::default_with_rng(rng));
1098        let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
1099        for observer in options.observers {
1100            m = m.with_observer(observer);
1101        }
1102        let func = LogLikelihood(self);
1103        #[cfg(feature = "rayon")]
1104        {
1105            m.sample(
1106                &func,
1107                &mut ThreadPoolBuilder::new()
1108                    .num_threads(options.threads)
1109                    .build()
1110                    .map_err(LadduError::from)?,
1111                n_steps,
1112                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1113            )?;
1114        }
1115        #[cfg(not(feature = "rayon"))]
1116        {
1117            m.sample(
1118                &func,
1119                &mut (),
1120                n_steps,
1121                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1122            )?;
1123        }
1124        Ok(m.ensemble)
1125    }
1126    /// Perform Particle Swarm Optimization on this [`NLL`].
1127    pub fn swarm(
1128        &self,
1129        swarm: Swarm,
1130        options: Option<SwarmOptions>,
1131        rng: Rng,
1132    ) -> Result<Swarm, LadduError> {
1133        let options = options.unwrap_or(SwarmOptions::default_with_rng(rng));
1134        let mut m = SwarmMinimizer::new(options.algorithm, swarm);
1135        for observer in options.observers {
1136            m = m.with_observer(observer);
1137        }
1138        #[cfg(feature = "rayon")]
1139        {
1140            m.minimize(
1141                self,
1142                &mut ThreadPoolBuilder::new()
1143                    .num_threads(options.threads)
1144                    .build()
1145                    .map_err(LadduError::from)?,
1146                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1147            )?;
1148        }
1149        #[cfg(not(feature = "rayon"))]
1150        {
1151            m.minimize(
1152                self,
1153                &mut (),
1154                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
1155            )?;
1156        }
1157        Ok(m.swarm)
1158    }
1159}
1160
1161/// A (extended) negative log-likelihood evaluator
1162///
1163/// Parameters
1164/// ----------
1165/// model: Model
1166///     The Model to evaluate
1167/// ds_data : Dataset
1168///     A Dataset representing true signal data
1169/// ds_accmc : Dataset
1170///     A Dataset of physically flat accepted Monte Carlo data used for normalization
1171///
1172#[cfg(feature = "python")]
1173#[pyclass(name = "NLL", module = "laddu")]
1174#[derive(Clone)]
1175pub struct PyNLL(pub Box<NLL>);
1176
1177#[cfg(feature = "python")]
1178#[pymethods]
1179impl PyNLL {
1180    #[new]
1181    #[pyo3(signature = (model, ds_data, ds_accmc))]
1182    fn new(model: &PyModel, ds_data: &PyDataset, ds_accmc: &PyDataset) -> Self {
1183        Self(NLL::new(&model.0, &ds_data.0, &ds_accmc.0))
1184    }
1185    /// The underlying signal dataset used in calculating the NLL
1186    ///
1187    /// Returns
1188    /// -------
1189    /// Dataset
1190    ///
1191    #[getter]
1192    fn data(&self) -> PyDataset {
1193        PyDataset(self.0.data_evaluator.dataset.clone())
1194    }
1195    /// The underlying accepted Monte Carlo dataset used in calculating the NLL
1196    ///
1197    /// Returns
1198    /// -------
1199    /// Dataset
1200    ///
1201    #[getter]
1202    fn accmc(&self) -> PyDataset {
1203        PyDataset(self.0.accmc_evaluator.dataset.clone())
1204    }
1205    /// Turn an ``NLL`` into a term that can be used by a ``LikelihoodManager``
1206    ///
1207    /// Returns
1208    /// -------
1209    /// term : LikelihoodTerm
1210    ///     The isolated NLL which can be used to build more complex models
1211    ///
1212    fn as_term(&self) -> PyLikelihoodTerm {
1213        PyLikelihoodTerm(self.0.clone())
1214    }
1215    /// The names of the free parameters used to evaluate the NLL
1216    ///
1217    /// Returns
1218    /// -------
1219    /// parameters : list of str
1220    ///
1221    #[getter]
1222    fn parameters(&self) -> Vec<String> {
1223        self.0.parameters()
1224    }
1225    /// Activates Amplitudes in the NLL by name
1226    ///
1227    /// Parameters
1228    /// ----------
1229    /// arg : str or list of str
1230    ///     Names of Amplitudes to be activated
1231    ///
1232    /// Raises
1233    /// ------
1234    /// TypeError
1235    ///     If `arg` is not a str or list of str
1236    /// ValueError
1237    ///     If `arg` or any items of `arg` are not registered Amplitudes
1238    ///
1239    fn activate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1240        if let Ok(string_arg) = arg.extract::<String>() {
1241            self.0.activate(&string_arg)?;
1242        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1243            let vec: Vec<String> = list_arg.extract()?;
1244            self.0.activate_many(&vec)?;
1245        } else {
1246            return Err(PyTypeError::new_err(
1247                "Argument must be either a string or a list of strings",
1248            ));
1249        }
1250        Ok(())
1251    }
1252    /// Activates all Amplitudes in the NLL
1253    ///
1254    fn activate_all(&self) {
1255        self.0.activate_all();
1256    }
1257    /// Deactivates Amplitudes in the NLL by name
1258    ///
1259    /// Deactivated Amplitudes act as zeros in the NLL
1260    ///
1261    /// Parameters
1262    /// ----------
1263    /// arg : str or list of str
1264    ///     Names of Amplitudes to be deactivated
1265    ///
1266    /// Raises
1267    /// ------
1268    /// TypeError
1269    ///     If `arg` is not a str or list of str
1270    /// ValueError
1271    ///     If `arg` or any items of `arg` are not registered Amplitudes
1272    ///
1273    fn deactivate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1274        if let Ok(string_arg) = arg.extract::<String>() {
1275            self.0.deactivate(&string_arg)?;
1276        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1277            let vec: Vec<String> = list_arg.extract()?;
1278            self.0.deactivate_many(&vec)?;
1279        } else {
1280            return Err(PyTypeError::new_err(
1281                "Argument must be either a string or a list of strings",
1282            ));
1283        }
1284        Ok(())
1285    }
1286    /// Deactivates all Amplitudes in the NLL
1287    ///
1288    fn deactivate_all(&self) {
1289        self.0.deactivate_all();
1290    }
1291    /// Isolates Amplitudes in the NLL by name
1292    ///
1293    /// Activates the Amplitudes given in `arg` and deactivates the rest
1294    ///
1295    /// Parameters
1296    /// ----------
1297    /// arg : str or list of str
1298    ///     Names of Amplitudes to be isolated
1299    ///
1300    /// Raises
1301    /// ------
1302    /// TypeError
1303    ///     If `arg` is not a str or list of str
1304    /// ValueError
1305    ///     If `arg` or any items of `arg` are not registered Amplitudes
1306    ///
1307    fn isolate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1308        if let Ok(string_arg) = arg.extract::<String>() {
1309            self.0.isolate(&string_arg)?;
1310        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1311            let vec: Vec<String> = list_arg.extract()?;
1312            self.0.isolate_many(&vec)?;
1313        } else {
1314            return Err(PyTypeError::new_err(
1315                "Argument must be either a string or a list of strings",
1316            ));
1317        }
1318        Ok(())
1319    }
1320    /// Evaluate the extended negative log-likelihood over the stored Datasets
1321    ///
1322    /// This is defined as
1323    ///
1324    /// .. math:: NLL(\vec{p}; D, MC) = -2 \left( \sum_{e \in D} (e_w \log(\mathcal{L}(e))) - \frac{1}{N_{MC}} \sum_{e \in MC} (e_w \mathcal{L}(e)) \right)
1325    ///
1326    /// Parameters
1327    /// ----------
1328    /// parameters : list of float
1329    ///     The values to use for the free parameters
1330    /// threads : int, optional
1331    ///     The number of threads to use (setting this to None will use all available CPUs)
1332    ///
1333    /// Returns
1334    /// -------
1335    /// result : float
1336    ///     The total negative log-likelihood
1337    ///
1338    /// Raises
1339    /// ------
1340    /// Exception
1341    ///     If there was an error building the thread pool
1342    ///
1343    #[pyo3(signature = (parameters, *, threads=None))]
1344    fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
1345        #[cfg(feature = "rayon")]
1346        {
1347            Ok(ThreadPoolBuilder::new()
1348                .num_threads(threads.unwrap_or_else(num_cpus::get))
1349                .build()
1350                .map_err(LadduError::from)?
1351                .install(|| LikelihoodTerm::evaluate(self.0.as_ref(), &parameters)))
1352        }
1353        #[cfg(not(feature = "rayon"))]
1354        {
1355            Ok(LikelihoodTerm::evaluate(self.0.as_ref(), &parameters))
1356        }
1357    }
1358    /// Evaluate the gradient of the negative log-likelihood over the stored Dataset
1359    ///
1360    /// Parameters
1361    /// ----------
1362    /// parameters : list of float
1363    ///     The values to use for the free parameters
1364    /// threads : int, optional
1365    ///     The number of threads to use (setting this to None will use all available CPUs)
1366    ///
1367    /// Returns
1368    /// -------
1369    /// result : array_like
1370    ///     A ``numpy`` array of representing the gradient of the negative log-likelihood over each parameter
1371    ///
1372    /// Raises
1373    /// ------
1374    /// Exception
1375    ///     If there was an error building the thread pool or problem creating the resulting
1376    ///     ``numpy`` array
1377    ///
1378    #[pyo3(signature = (parameters, *, threads=None))]
1379    fn evaluate_gradient<'py>(
1380        &self,
1381        py: Python<'py>,
1382        parameters: Vec<Float>,
1383        threads: Option<usize>,
1384    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1385        #[cfg(feature = "rayon")]
1386        {
1387            Ok(PyArray1::from_slice(
1388                py,
1389                ThreadPoolBuilder::new()
1390                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1391                    .build()
1392                    .map_err(LadduError::from)?
1393                    .install(|| self.0.evaluate_gradient(&parameters))
1394                    .as_slice(),
1395            ))
1396        }
1397        #[cfg(not(feature = "rayon"))]
1398        {
1399            Ok(PyArray1::from_slice(
1400                py,
1401                self.0.evaluate_gradient(&parameters).as_slice(),
1402            ))
1403        }
1404    }
1405    /// Project the model over the Monte Carlo dataset with the given parameter values
1406    ///
1407    /// This is defined as
1408    ///
1409    /// .. math:: e_w(\vec{p}) = \frac{e_w}{N_{MC}} \mathcal{L}(e)
1410    ///
1411    /// Parameters
1412    /// ----------
1413    /// parameters : list of float
1414    ///     The values to use for the free parameters
1415    /// mc_evaluator: Evaluator, optional
1416    ///     Project using the given Evaluator or use the stored ``accmc`` if None
1417    /// threads : int, optional
1418    ///     The number of threads to use (setting this to None will use all available CPUs)
1419    ///
1420    /// Returns
1421    /// -------
1422    /// result : array_like
1423    ///     Weights for every Monte Carlo event which represent the fit to data
1424    ///
1425    /// Raises
1426    /// ------
1427    /// Exception
1428    ///     If there was an error building the thread pool or problem creating the resulting
1429    ///     ``numpy`` array
1430    ///
1431    #[pyo3(signature = (parameters, *, mc_evaluator = None, threads=None))]
1432    fn project<'py>(
1433        &self,
1434        py: Python<'py>,
1435        parameters: Vec<Float>,
1436        mc_evaluator: Option<PyEvaluator>,
1437        threads: Option<usize>,
1438    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1439        #[cfg(feature = "rayon")]
1440        {
1441            Ok(PyArray1::from_slice(
1442                py,
1443                ThreadPoolBuilder::new()
1444                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1445                    .build()
1446                    .map_err(LadduError::from)?
1447                    .install(|| {
1448                        self.0
1449                            .project(&parameters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1450                    })
1451                    .as_slice(),
1452            ))
1453        }
1454        #[cfg(not(feature = "rayon"))]
1455        {
1456            Ok(PyArray1::from_slice(
1457                py,
1458                self.0
1459                    .project(&parameters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1460                    .as_slice(),
1461            ))
1462        }
1463    }
1464
1465    /// Project the model over the Monte Carlo dataset with the given parameter values, first
1466    /// isolating the given terms by name. The NLL is then reset to its previous state of
1467    /// activation.
1468    ///
1469    /// This is defined as
1470    ///
1471    /// .. math:: e_w(\vec{p}) = \frac{e_w}{N_{MC}} \mathcal{L}(e)
1472    ///
1473    /// Parameters
1474    /// ----------
1475    /// parameters : list of float
1476    ///     The values to use for the free parameters
1477    /// arg : str or list of str
1478    ///     Names of Amplitudes to be isolated
1479    /// mc_evaluator: Evaluator, optional
1480    ///     Project using the given Evaluator or use the stored ``accmc`` if None
1481    /// threads : int, optional
1482    ///     The number of threads to use (setting this to None will use all available CPUs)
1483    ///
1484    /// Returns
1485    /// -------
1486    /// result : array_like
1487    ///     Weights for every Monte Carlo event which represent the fit to data
1488    ///
1489    /// Raises
1490    /// ------
1491    /// TypeError
1492    ///     If `arg` is not a str or list of str
1493    ///
1494    /// Raises
1495    /// ------
1496    /// Exception
1497    ///     If there was an error building the thread pool or problem creating the resulting
1498    ///     ``numpy`` array
1499    /// ValueError
1500    ///     If `arg` or any items of `arg` are not registered Amplitudes
1501    ///
1502    #[pyo3(signature = (parameters, arg, *, mc_evaluator = None, threads=None))]
1503    fn project_with<'py>(
1504        &self,
1505        py: Python<'py>,
1506        parameters: Vec<Float>,
1507        arg: &Bound<'_, PyAny>,
1508        mc_evaluator: Option<PyEvaluator>,
1509        threads: Option<usize>,
1510    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1511        let names = if let Ok(string_arg) = arg.extract::<String>() {
1512            vec![string_arg]
1513        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1514            let vec: Vec<String> = list_arg.extract()?;
1515            vec
1516        } else {
1517            return Err(PyTypeError::new_err(
1518                "Argument must be either a string or a list of strings",
1519            ));
1520        };
1521        #[cfg(feature = "rayon")]
1522        {
1523            Ok(PyArray1::from_slice(
1524                py,
1525                ThreadPoolBuilder::new()
1526                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1527                    .build()
1528                    .map_err(LadduError::from)?
1529                    .install(|| {
1530                        self.0.project_with(
1531                            &parameters,
1532                            &names,
1533                            mc_evaluator.map(|pyeval| pyeval.0.clone()),
1534                        )
1535                    })?
1536                    .as_slice(),
1537            ))
1538        }
1539        #[cfg(not(feature = "rayon"))]
1540        {
1541            Ok(PyArray1::from_slice(
1542                py,
1543                self.0
1544                    .project_with(
1545                        &parameters,
1546                        &names,
1547                        mc_evaluator.map(|pyeval| pyeval.0.clone()),
1548                    )?
1549                    .as_slice(),
1550            ))
1551        }
1552    }
1553
1554    /// Minimize the NLL with respect to the free parameters in the model
1555    ///
1556    /// This method "runs the fit". Given an initial position `p0` and optional `bounds`, this
1557    /// method performs a minimization over the negative log-likelihood, optimizing the model
1558    /// over the stored signal data and Monte Carlo.
1559    ///
1560    /// Parameters
1561    /// ----------
1562    /// p0 : array_like
1563    ///     The initial parameters at the start of optimization
1564    /// bounds : list of tuple of float, optional
1565    ///     A list of lower and upper bound pairs for each parameter (use ``None`` for no bound)
1566    /// method : {LBFGSB, NelderMead}, optional
1567    ///     The minimization algorithm to use (defaults to LBFGSB)
1568    /// observers : Observer or list of Observers, optional
1569    ///     Callback functions which are applied after every algorithm step
1570    /// max_steps : int, default=4000
1571    ///     The maximum number of algorithm steps to perform
1572    /// debug : bool, default=False
1573    ///     Set to ``True`` to print out debugging information at each step
1574    /// verbose : bool, default=False
1575    ///     Set to ``True`` to print verbose information at each step
1576    /// show_step : bool, default=True
1577    ///     Include step number in verbose output
1578    /// show_x : bool, default=True
1579    ///     Include current best position in verbose output
1580    /// show_fx : bool, default=True
1581    ///     Include current best NLL in verbose output
1582    /// skip_hessian : bool, default = False
1583    ///     Skip calculation of the Hessian matrix (and parameter errors) at the termination of the
1584    ///     algorithm (use this when uncertainties are not needed/important)
1585    /// threads : int, optional
1586    ///     The number of threads to use (setting this to None will use all available CPUs)
1587    ///
1588    /// Returns
1589    /// -------
1590    /// Status
1591    ///     The status of the minimization algorithm at termination
1592    ///
1593    /// Raises
1594    /// ------
1595    /// Exception
1596    ///     If there was an error building the thread pool
1597    ///
1598    #[pyo3(signature = (p0, *, bounds=None, method=None, observers=None, max_steps=4000, debug=false, verbose=false, show_step=true, show_x=true, show_fx=true, skip_hessian=false, threads=None))]
1599    #[allow(clippy::too_many_arguments)]
1600    fn minimize(
1601        &self,
1602        p0: Vec<Float>,
1603        bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
1604        method: Option<Bound<'_, PyAny>>,
1605        observers: Option<Bound<'_, PyAny>>,
1606        max_steps: usize,
1607        debug: bool,
1608        verbose: bool,
1609        show_step: bool,
1610        show_x: bool,
1611        show_fx: bool,
1612        skip_hessian: bool,
1613        threads: Option<usize>,
1614    ) -> PyResult<PyStatus> {
1615        let bounds = bounds.map(|bounds_vec| {
1616            bounds_vec
1617                .iter()
1618                .map(|(opt_lb, opt_ub)| {
1619                    (
1620                        opt_lb.unwrap_or(Float::NEG_INFINITY),
1621                        opt_ub.unwrap_or(Float::INFINITY),
1622                    )
1623                })
1624                .collect()
1625        });
1626        let options = py_parse_minimizer_options(
1627            method,
1628            observers,
1629            max_steps,
1630            debug,
1631            verbose,
1632            show_step,
1633            show_x,
1634            show_fx,
1635            skip_hessian,
1636            threads,
1637        )?;
1638        let status = self.0.minimize(&p0, bounds, Some(options))?;
1639        Ok(PyStatus(status))
1640    }
1641    /// Run an MCMC algorithm on the free parameters of the NLL's model
1642    ///
1643    /// This method can be used to sample the underlying log-likelihood given an initial
1644    /// position for each walker `p0`.
1645    ///
1646    /// Parameters
1647    /// ----------
1648    /// p0 : array_like
1649    ///     The initial parameters at the start of optimization
1650    /// n_steps : int,
1651    ///     The number of MCMC steps each walker should take
1652    /// method : {ESS, AIES}, optional
1653    ///     The MCMC algorithm to use (defaults to ESS)
1654    /// observers : MCMCObserver or list of MCMCObservers, optional
1655    ///     Callback functions which are applied after every sampling step
1656    /// debug : bool, default=False
1657    ///     Set to ``True`` to print out debugging information at each step
1658    /// verbose : bool, default=False
1659    ///     Set to ``True`` to print verbose information at each step
1660    /// seed : int,
1661    ///     The seed for the random number generator
1662    /// threads : int, optional
1663    ///     The number of threads to use (setting this to None will use all available CPUs)
1664    ///
1665    /// Returns
1666    /// -------
1667    /// Ensemble
1668    ///     The resulting ensemble of walkers
1669    ///
1670    /// Raises
1671    /// ------
1672    /// Exception
1673    ///     If there was an error building the thread pool
1674    ///
1675    /// Notes
1676    /// -----
1677    /// The default algorithm is the ESS algorithm with a moveset of of 90% differential moves to
1678    /// 10% gaussian moves.
1679    ///
1680    /// Since MCMC methods are inclined to sample maxima rather than minima, the underlying
1681    /// function sign is automatically flipped when calling this method.
1682    ///
1683    #[pyo3(signature = (p0, n_steps, *, method=None, observers=None, debug=false, verbose=false, seed=0, threads=None))]
1684    #[allow(clippy::too_many_arguments)]
1685    fn mcmc(
1686        &self,
1687        p0: Vec<Vec<Float>>,
1688        n_steps: usize,
1689        method: Option<Bound<'_, PyAny>>,
1690        observers: Option<Bound<'_, PyAny>>,
1691        debug: bool,
1692        verbose: bool,
1693        seed: u64,
1694        threads: Option<usize>,
1695    ) -> PyResult<PyEnsemble> {
1696        let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
1697        let mut rng = Rng::new();
1698        rng.seed(seed);
1699        let options =
1700            py_parse_mcmc_options(method, observers, debug, verbose, threads, rng.clone())?;
1701        let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
1702        Ok(PyEnsemble(ensemble))
1703    }
1704
1705    /// Run a Particle Swarm Optimization algorithm on the free parameters of the NLL's model
1706    ///
1707    /// This method can be used minimize the underlying negative log-likelihood given
1708    /// an initial swarm position.
1709    ///
1710    /// Parameters
1711    /// ----------
1712    /// swarm : Swarm
1713    ///     The initial position of the swarm
1714    /// method : {PSO}, optional
1715    ///     The swarm algorithm to use (defaults to PSO)
1716    /// observers : SwarmObserver or list of SwarmObservers, optional
1717    ///     Callback functions which are applied after every swarm step
1718    /// debug : bool, default=False
1719    ///     Set to ``True`` to print out debugging information at each step
1720    /// verbose : bool, default=False
1721    ///     Set to ``True`` to print verbose information at each step
1722    /// seed : int,
1723    ///     The seed for the random number generator
1724    /// threads : int, optional
1725    ///     The number of threads to use (setting this to None will use all available CPUs)
1726    ///
1727    /// Returns
1728    /// -------
1729    /// Swarm
1730    ///     The swarm at its final position
1731    ///
1732    /// Raises
1733    /// ------
1734    /// Exception
1735    ///     If there was an error building the thread pool
1736    ///
1737    #[pyo3(signature = (swarm, *, method=None, observers=None, debug=false, verbose=false, seed=0, threads=None))]
1738    #[allow(clippy::too_many_arguments)]
1739    fn swarm(
1740        &self,
1741        swarm: PySwarm,
1742        method: Option<Bound<'_, PyAny>>,
1743        observers: Option<Bound<'_, PyAny>>,
1744        debug: bool,
1745        verbose: bool,
1746        seed: u64,
1747        threads: Option<usize>,
1748    ) -> PyResult<PySwarm> {
1749        let mut rng = Rng::new();
1750        rng.seed(seed);
1751        let options =
1752            py_parse_swarm_options(method, observers, debug, verbose, threads, rng.clone())?;
1753        let swarm = self.0.swarm(swarm.0, Some(options), rng)?;
1754        Ok(PySwarm(swarm))
1755    }
1756}
1757
1758/// An identifier that can be used like an [`AmplitudeID`](`laddu_core::amplitudes::AmplitudeID`) to combine registered
1759/// [`LikelihoodTerm`]s.
1760#[derive(Clone, Debug)]
1761pub struct LikelihoodID(usize, Option<String>);
1762
1763impl Display for LikelihoodID {
1764    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1765        if let Some(name) = &self.1 {
1766            write!(f, "{}({})", name, self.0)
1767        } else {
1768            write!(f, "({})", self.0)
1769        }
1770    }
1771}
1772
1773/// An object which holds a registered ``LikelihoodTerm``
1774///
1775/// See Also
1776/// --------
1777/// laddu.LikelihoodManager.register
1778///
1779#[cfg(feature = "python")]
1780#[pyclass(name = "LikelihoodID", module = "laddu")]
1781#[derive(Clone)]
1782pub struct PyLikelihoodID(LikelihoodID);
1783
1784#[cfg(feature = "python")]
1785#[pymethods]
1786impl PyLikelihoodID {
1787    fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1788        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1789            Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
1790        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1791            Ok(PyLikelihoodExpression(
1792                self.0.clone() + other_expr.0.clone(),
1793            ))
1794        } else if let Ok(int) = other.extract::<usize>() {
1795            if int == 0 {
1796                Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1797                    self.0.clone(),
1798                )))
1799            } else {
1800                Err(PyTypeError::new_err(
1801                    "Addition with an integer for this type is only defined for 0",
1802                ))
1803            }
1804        } else {
1805            Err(PyTypeError::new_err("Unsupported operand type for +"))
1806        }
1807    }
1808    fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1809        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1810            Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
1811        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1812            Ok(PyLikelihoodExpression(
1813                other_expr.0.clone() + self.0.clone(),
1814            ))
1815        } else if let Ok(int) = other.extract::<usize>() {
1816            if int == 0 {
1817                Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1818                    self.0.clone(),
1819                )))
1820            } else {
1821                Err(PyTypeError::new_err(
1822                    "Addition with an integer for this type is only defined for 0",
1823                ))
1824            }
1825        } else {
1826            Err(PyTypeError::new_err("Unsupported operand type for +"))
1827        }
1828    }
1829    fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1830        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1831            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
1832        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1833            Ok(PyLikelihoodExpression(
1834                self.0.clone() * other_expr.0.clone(),
1835            ))
1836        } else {
1837            Err(PyTypeError::new_err("Unsupported operand type for *"))
1838        }
1839    }
1840    fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1841        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1842            Ok(PyLikelihoodExpression(other_aid.0.clone() * self.0.clone()))
1843        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1844            Ok(PyLikelihoodExpression(
1845                other_expr.0.clone() * self.0.clone(),
1846            ))
1847        } else {
1848            Err(PyTypeError::new_err("Unsupported operand type for *"))
1849        }
1850    }
1851    fn __str__(&self) -> String {
1852        format!("{}", self.0)
1853    }
1854    fn __repr__(&self) -> String {
1855        format!("{:?}", self.0)
1856    }
1857}
1858
1859/// A [`Manager`](`laddu_core::Manager`) but for [`LikelihoodTerm`]s.
1860#[derive(Default, Clone)]
1861pub struct LikelihoodManager {
1862    terms: Vec<Box<dyn LikelihoodTerm>>,
1863    param_name_to_index: HashMap<String, usize>,
1864    param_names: Vec<String>,
1865    param_layouts: Vec<Vec<usize>>,
1866    param_counts: Vec<usize>,
1867}
1868
1869impl LikelihoodManager {
1870    /// Register a [`LikelihoodTerm`] to get a [`LikelihoodID`] which can be combined with others
1871    /// to form [`LikelihoodExpression`]s which can be minimized.
1872    pub fn register(&mut self, term: Box<dyn LikelihoodTerm>) -> LikelihoodID {
1873        let term_idx = self.terms.len();
1874        for param_name in term.parameters() {
1875            if !self.param_name_to_index.contains_key(&param_name) {
1876                self.param_name_to_index
1877                    .insert(param_name.clone(), self.param_name_to_index.len());
1878                self.param_names.push(param_name.clone());
1879            }
1880        }
1881        let param_layout: Vec<usize> = term
1882            .parameters()
1883            .iter()
1884            .map(|name| self.param_name_to_index[name])
1885            .collect();
1886        let param_count = term.parameters().len();
1887        self.param_layouts.push(param_layout);
1888        self.param_counts.push(param_count);
1889        self.terms.push(term.clone());
1890
1891        LikelihoodID(term_idx, None)
1892    }
1893
1894    /// Register (and name) a [`LikelihoodTerm`] to get a [`LikelihoodID`] which can be combined with others
1895    /// to form [`LikelihoodExpression`]s which can be minimized.
1896    pub fn register_with_name<T: AsRef<str>>(
1897        &mut self,
1898        term: Box<dyn LikelihoodTerm>,
1899        name: T,
1900    ) -> LikelihoodID {
1901        let term_idx = self.terms.len();
1902        for param_name in term.parameters() {
1903            if !self.param_name_to_index.contains_key(&param_name) {
1904                self.param_name_to_index
1905                    .insert(param_name.clone(), self.param_name_to_index.len());
1906                self.param_names.push(param_name.clone());
1907            }
1908        }
1909        let param_layout: Vec<usize> = term
1910            .parameters()
1911            .iter()
1912            .map(|name| self.param_name_to_index[name])
1913            .collect();
1914        let param_count = term.parameters().len();
1915        self.param_layouts.push(param_layout);
1916        self.param_counts.push(param_count);
1917        self.terms.push(term.clone());
1918
1919        LikelihoodID(term_idx, Some(name.as_ref().to_string().clone()))
1920    }
1921
1922    /// Return all of the parameter names of registered [`LikelihoodTerm`]s in order. This only
1923    /// returns the unique names in the order they should be input when evaluated with a
1924    /// [`LikelihoodEvaluator`].
1925    pub fn parameters(&self) -> Vec<String> {
1926        self.param_names.clone()
1927    }
1928
1929    /// Load a [`LikelihoodExpression`] to generate a [`LikelihoodEvaluator`] that can be
1930    /// minimized.
1931    pub fn load(&self, likelihood_expression: &LikelihoodExpression) -> LikelihoodEvaluator {
1932        LikelihoodEvaluator {
1933            likelihood_manager: self.clone(),
1934            likelihood_expression: likelihood_expression.clone(),
1935        }
1936    }
1937}
1938
1939/// A class which can be used to register LikelihoodTerms and store precalculated data
1940///
1941#[cfg(feature = "python")]
1942#[pyclass(name = "LikelihoodManager", module = "laddu")]
1943#[derive(Clone)]
1944pub struct PyLikelihoodManager(LikelihoodManager);
1945
1946#[cfg(feature = "python")]
1947#[pymethods]
1948impl PyLikelihoodManager {
1949    #[new]
1950    fn new() -> Self {
1951        Self(LikelihoodManager::default())
1952    }
1953    /// Register a LikelihoodTerm with the LikelihoodManager
1954    ///
1955    /// Parameters
1956    /// ----------
1957    /// term : LikelihoodTerm
1958    ///     The LikelihoodTerm to register
1959    ///
1960    /// Returns
1961    /// -------
1962    /// LikelihoodID
1963    ///     A reference to the registered ``likelihood`` that can be used to form complex
1964    ///     LikelihoodExpressions
1965    ///
1966    #[pyo3(signature = (likelihood_term, *, name = None))]
1967    fn register(
1968        &mut self,
1969        likelihood_term: &PyLikelihoodTerm,
1970        name: Option<String>,
1971    ) -> PyLikelihoodID {
1972        if let Some(name) = name {
1973            PyLikelihoodID(self.0.register_with_name(likelihood_term.0.clone(), name))
1974        } else {
1975            PyLikelihoodID(self.0.register(likelihood_term.0.clone()))
1976        }
1977    }
1978    /// The free parameters used by all terms in the LikelihoodManager
1979    ///
1980    /// Returns
1981    /// -------
1982    /// parameters : list of str
1983    ///     The list of parameter names
1984    ///
1985    fn parameters(&self) -> Vec<String> {
1986        self.0.parameters()
1987    }
1988    /// Load a LikelihoodExpression by precalculating each term over their internal Datasets
1989    ///
1990    /// Parameters
1991    /// ----------
1992    /// likelihood_expression : LikelihoodExpression
1993    ///     The expression to use in precalculation
1994    ///
1995    /// Returns
1996    /// -------
1997    /// LikelihoodEvaluator
1998    ///     An object that can be used to evaluate the `likelihood_expression` over all managed
1999    ///     terms
2000    ///
2001    /// Notes
2002    /// -----
2003    /// While the given `likelihood_expression` will be the one evaluated in the end, all registered
2004    /// Amplitudes will be loaded, and all of their parameters will be included in the final
2005    /// expression. These parameters will have no effect on evaluation, but they must be
2006    /// included in function calls.
2007    ///
2008    /// See Also
2009    /// --------
2010    /// LikelihoodManager.parameters
2011    ///
2012    fn load(&self, likelihood_expression: &PyLikelihoodExpression) -> PyLikelihoodEvaluator {
2013        PyLikelihoodEvaluator(self.0.load(&likelihood_expression.0))
2014    }
2015}
2016
2017#[derive(Debug)]
2018struct LikelihoodValues(Vec<Float>);
2019
2020#[derive(Debug)]
2021struct LikelihoodGradients(Vec<DVector<Float>>);
2022
2023/// A combination of [`LikelihoodTerm`]s as well as sums and products of them.
2024#[derive(Clone, Default)]
2025pub enum LikelihoodExpression {
2026    /// A expression equal to zero.
2027    #[default]
2028    Zero,
2029    /// A expression equal to one.
2030    One,
2031    /// A registered [`LikelihoodTerm`] referenced by an [`LikelihoodID`].
2032    Term(LikelihoodID),
2033    /// The sum of two [`LikelihoodExpression`]s.
2034    Add(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
2035    /// The product of two [`LikelihoodExpression`]s.
2036    Mul(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
2037}
2038
2039impl Debug for LikelihoodExpression {
2040    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2041        self.write_tree(f, "", "", "")
2042    }
2043}
2044
2045impl Display for LikelihoodExpression {
2046    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2047        write!(f, "{:?}", self)
2048    }
2049}
2050
2051impl LikelihoodExpression {
2052    fn evaluate(&self, likelihood_values: &LikelihoodValues) -> Float {
2053        match self {
2054            LikelihoodExpression::Zero => 0.0,
2055            LikelihoodExpression::One => 1.0,
2056            LikelihoodExpression::Term(lid) => likelihood_values.0[lid.0],
2057            LikelihoodExpression::Add(a, b) => {
2058                a.evaluate(likelihood_values) + b.evaluate(likelihood_values)
2059            }
2060            LikelihoodExpression::Mul(a, b) => {
2061                a.evaluate(likelihood_values) * b.evaluate(likelihood_values)
2062            }
2063        }
2064    }
2065    fn evaluate_gradient(
2066        &self,
2067        likelihood_values: &LikelihoodValues,
2068        likelihood_gradients: &LikelihoodGradients,
2069    ) -> DVector<Float> {
2070        match self {
2071            LikelihoodExpression::Zero => DVector::zeros(0),
2072            LikelihoodExpression::One => DVector::zeros(0),
2073            LikelihoodExpression::Term(lid) => likelihood_gradients.0[lid.0].clone(),
2074            LikelihoodExpression::Add(a, b) => {
2075                a.evaluate_gradient(likelihood_values, likelihood_gradients)
2076                    + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2077            }
2078            LikelihoodExpression::Mul(a, b) => {
2079                a.evaluate_gradient(likelihood_values, likelihood_gradients)
2080                    * b.evaluate(likelihood_values)
2081                    + b.evaluate_gradient(likelihood_values, likelihood_gradients)
2082                        * a.evaluate(likelihood_values)
2083            }
2084        }
2085    }
2086    /// Credit to Daniel Janus: <https://blog.danieljanus.pl/2023/07/20/iterating-trees/>
2087    fn write_tree(
2088        &self,
2089        f: &mut std::fmt::Formatter<'_>,
2090        parent_prefix: &str,
2091        immediate_prefix: &str,
2092        parent_suffix: &str,
2093    ) -> std::fmt::Result {
2094        let display_string = match self {
2095            Self::Zero => "0".to_string(),
2096            Self::One => "1".to_string(),
2097            Self::Term(lid) => {
2098                format!("{}", lid.0)
2099            }
2100            Self::Add(_, _) => "+".to_string(),
2101            Self::Mul(_, _) => "*".to_string(),
2102        };
2103        writeln!(f, "{}{}{}", parent_prefix, immediate_prefix, display_string)?;
2104        match self {
2105            Self::Term(_) | Self::Zero | Self::One => {}
2106            Self::Add(a, b) | Self::Mul(a, b) => {
2107                let terms = [a, b];
2108                let mut it = terms.iter().peekable();
2109                let child_prefix = format!("{}{}", parent_prefix, parent_suffix);
2110                while let Some(child) = it.next() {
2111                    match it.peek() {
2112                        Some(_) => child.write_tree(f, &child_prefix, "├─ ", "│  "),
2113                        None => child.write_tree(f, &child_prefix, "└─ ", "   "),
2114                    }?;
2115                }
2116            }
2117        }
2118        Ok(())
2119    }
2120}
2121
2122impl_op_ex!(+ |a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(a.clone()), Box::new(b.clone()))});
2123impl_op_ex!(
2124    *|a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression {
2125        LikelihoodExpression::Mul(Box::new(a.clone()), Box::new(b.clone()))
2126    }
2127);
2128impl_op_ex_commutative!(+ |a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(b.clone()))});
2129impl_op_ex_commutative!(
2130    *|a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression {
2131        LikelihoodExpression::Mul(
2132            Box::new(LikelihoodExpression::Term(a.clone())),
2133            Box::new(b.clone()),
2134        )
2135    }
2136);
2137impl_op_ex!(+ |a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(LikelihoodExpression::Term(b.clone())))});
2138impl_op_ex!(
2139    *|a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression {
2140        LikelihoodExpression::Mul(
2141            Box::new(LikelihoodExpression::Term(a.clone())),
2142            Box::new(LikelihoodExpression::Term(b.clone())),
2143        )
2144    }
2145);
2146
2147/// A mathematical expression formed from LikelihoodIDs
2148///
2149#[cfg(feature = "python")]
2150#[pyclass(name = "LikelihoodExpression", module = "laddu")]
2151#[derive(Clone)]
2152pub struct PyLikelihoodExpression(LikelihoodExpression);
2153
2154/// A convenience method to sum sequences of LikelihoodExpressions
2155///
2156#[cfg(feature = "python")]
2157#[pyfunction(name = "likelihood_sum")]
2158pub fn py_likelihood_sum(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2159    if terms.is_empty() {
2160        return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2161    }
2162    if terms.len() == 1 {
2163        let term = &terms[0];
2164        if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2165            return Ok(expression);
2166        }
2167        if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2168            return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2169                py_amplitude_id.0,
2170            )));
2171        }
2172        return Err(PyTypeError::new_err(
2173            "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2174        ));
2175    }
2176    let mut iter = terms.iter();
2177    let Some(first_term) = iter.next() else {
2178        return Ok(PyLikelihoodExpression(LikelihoodExpression::Zero));
2179    };
2180    if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2181        let mut summation = first_expression.clone();
2182        for term in iter {
2183            summation = summation.__add__(term)?;
2184        }
2185        return Ok(summation);
2186    }
2187    if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2188        let mut summation =
2189            PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2190        for term in iter {
2191            summation = summation.__add__(term)?;
2192        }
2193        return Ok(summation);
2194    }
2195    Err(PyTypeError::new_err(
2196        "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2197    ))
2198}
2199
2200/// A convenience method to multiply sequences of LikelihoodExpressions
2201///
2202#[cfg(feature = "python")]
2203#[pyfunction(name = "likelihood_product")]
2204pub fn py_likelihood_product(terms: Vec<Bound<'_, PyAny>>) -> PyResult<PyLikelihoodExpression> {
2205    if terms.is_empty() {
2206        return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2207    }
2208    if terms.len() == 1 {
2209        let term = &terms[0];
2210        if let Ok(expression) = term.extract::<PyLikelihoodExpression>() {
2211            return Ok(expression);
2212        }
2213        if let Ok(py_amplitude_id) = term.extract::<PyLikelihoodID>() {
2214            return Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
2215                py_amplitude_id.0,
2216            )));
2217        }
2218        return Err(PyTypeError::new_err(
2219            "Item is neither a PyLikelihoodExpression nor a PyLikelihoodID",
2220        ));
2221    }
2222    let mut iter = terms.iter();
2223    let Some(first_term) = iter.next() else {
2224        return Ok(PyLikelihoodExpression(LikelihoodExpression::One));
2225    };
2226    if let Ok(first_expression) = first_term.extract::<PyLikelihoodExpression>() {
2227        let mut product = first_expression.clone();
2228        for term in iter {
2229            product = product.__mul__(term)?;
2230        }
2231        return Ok(product);
2232    }
2233    if let Ok(first_likelihood_id) = first_term.extract::<PyLikelihoodID>() {
2234        let mut product = PyLikelihoodExpression(LikelihoodExpression::Term(first_likelihood_id.0));
2235        for term in iter {
2236            product = product.__mul__(term)?;
2237        }
2238        return Ok(product);
2239    }
2240    Err(PyTypeError::new_err(
2241        "Elements must be PyLikelihoodExpression or PyLikelihoodID",
2242    ))
2243}
2244
2245/// A convenience class representing a zero-valued LikelihoodExpression
2246///
2247#[cfg(feature = "python")]
2248#[pyfunction(name = "LikelihoodZero")]
2249pub fn py_likelihood_zero() -> PyLikelihoodExpression {
2250    PyLikelihoodExpression(LikelihoodExpression::Zero)
2251}
2252
2253/// A convenience class representing a unit-valued LikelihoodExpression
2254///
2255#[cfg(feature = "python")]
2256#[pyfunction(name = "LikelihoodOne")]
2257pub fn py_likelihood_one() -> PyLikelihoodExpression {
2258    PyLikelihoodExpression(LikelihoodExpression::One)
2259}
2260
2261#[cfg(feature = "python")]
2262#[pymethods]
2263impl PyLikelihoodExpression {
2264    fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2265        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2266            Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
2267        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2268            Ok(PyLikelihoodExpression(
2269                self.0.clone() + other_expr.0.clone(),
2270            ))
2271        } else if let Ok(int) = other.extract::<usize>() {
2272            if int == 0 {
2273                Ok(PyLikelihoodExpression(self.0.clone()))
2274            } else {
2275                Err(PyTypeError::new_err(
2276                    "Addition with an integer for this type is only defined for 0",
2277                ))
2278            }
2279        } else {
2280            Err(PyTypeError::new_err("Unsupported operand type for +"))
2281        }
2282    }
2283    fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2284        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2285            Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
2286        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2287            Ok(PyLikelihoodExpression(
2288                other_expr.0.clone() + self.0.clone(),
2289            ))
2290        } else if let Ok(int) = other.extract::<usize>() {
2291            if int == 0 {
2292                Ok(PyLikelihoodExpression(self.0.clone()))
2293            } else {
2294                Err(PyTypeError::new_err(
2295                    "Addition with an integer for this type is only defined for 0",
2296                ))
2297            }
2298        } else {
2299            Err(PyTypeError::new_err("Unsupported operand type for +"))
2300        }
2301    }
2302    fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2303        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2304            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2305        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2306            Ok(PyLikelihoodExpression(
2307                self.0.clone() * other_expr.0.clone(),
2308            ))
2309        } else {
2310            Err(PyTypeError::new_err("Unsupported operand type for *"))
2311        }
2312    }
2313    fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2314        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2315            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2316        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2317            Ok(PyLikelihoodExpression(
2318                self.0.clone() * other_expr.0.clone(),
2319            ))
2320        } else {
2321            Err(PyTypeError::new_err("Unsupported operand type for *"))
2322        }
2323    }
2324    fn __str__(&self) -> String {
2325        format!("{}", self.0)
2326    }
2327    fn __repr__(&self) -> String {
2328        format!("{:?}", self.0)
2329    }
2330}
2331
2332/// A structure to evaluate and minimize combinations of [`LikelihoodTerm`]s.
2333pub struct LikelihoodEvaluator {
2334    likelihood_manager: LikelihoodManager,
2335    likelihood_expression: LikelihoodExpression,
2336}
2337
2338#[cfg(feature = "rayon")]
2339impl Function<ThreadPool, LadduError> for LikelihoodEvaluator {
2340    fn evaluate(
2341        &self,
2342        parameters: &[Float],
2343        thread_pool: &mut ThreadPool,
2344    ) -> Result<Float, LadduError> {
2345        thread_pool.install(|| self.evaluate(parameters))
2346    }
2347    fn gradient(
2348        &self,
2349        parameters: &[Float],
2350        thread_pool: &mut ThreadPool,
2351    ) -> Result<DVector<Float>, LadduError> {
2352        thread_pool.install(|| self.evaluate_gradient(parameters))
2353    }
2354}
2355
2356#[cfg(not(feature = "rayon"))]
2357impl Function<(), LadduError> for LikelihoodEvaluator {
2358    fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
2359        self.evaluate(parameters)
2360    }
2361    fn gradient(
2362        &self,
2363        parameters: &[Float],
2364        _user_data: &mut (),
2365    ) -> Result<DVector<Float>, LadduError> {
2366        self.evaluate_gradient(parameters)
2367    }
2368}
2369
2370pub(crate) struct NegativeLikelihoodEvaluator<'a>(&'a LikelihoodEvaluator);
2371#[cfg(feature = "rayon")]
2372impl Function<ThreadPool, LadduError> for NegativeLikelihoodEvaluator<'_> {
2373    fn evaluate(
2374        &self,
2375        parameters: &[Float],
2376        thread_pool: &mut ThreadPool,
2377    ) -> Result<Float, LadduError> {
2378        Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
2379    }
2380    fn gradient(
2381        &self,
2382        parameters: &[Float],
2383        thread_pool: &mut ThreadPool,
2384    ) -> Result<DVector<Float>, LadduError> {
2385        Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
2386    }
2387}
2388
2389#[cfg(not(feature = "rayon"))]
2390impl Function<(), LadduError> for NegativeLikelihoodEvaluator<'_> {
2391    fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
2392        Function::evaluate(self.0, parameters, user_data).map(|res| -res)
2393    }
2394    fn gradient(
2395        &self,
2396        parameters: &[Float],
2397        user_data: &mut (),
2398    ) -> Result<DVector<Float>, LadduError> {
2399        Function::gradient(self.0, parameters, user_data).map(|res| -res)
2400    }
2401}
2402
2403impl LikelihoodEvaluator {
2404    /// The parameter names used in [`LikelihoodEvaluator::evaluate`]'s input in order.
2405    pub fn parameters(&self) -> Vec<String> {
2406        self.likelihood_manager.parameters()
2407    }
2408    /// A function that can be called to evaluate the sum/product of the [`LikelihoodTerm`]s
2409    /// contained by this [`LikelihoodEvaluator`].
2410    pub fn evaluate(&self, parameters: &[Float]) -> Result<Float, LadduError> {
2411        let mut param_buffers: Vec<Vec<Float>> = self
2412            .likelihood_manager
2413            .param_counts
2414            .iter()
2415            .map(|&count| vec![0.0; count])
2416            .collect();
2417        for (layout, buffer) in self
2418            .likelihood_manager
2419            .param_layouts
2420            .iter()
2421            .zip(param_buffers.iter_mut())
2422        {
2423            for (buffer_idx, &param_idx) in layout.iter().enumerate() {
2424                buffer[buffer_idx] = parameters[param_idx];
2425            }
2426        }
2427        let likelihood_values = LikelihoodValues(
2428            self.likelihood_manager
2429                .terms
2430                .iter()
2431                .zip(param_buffers.iter())
2432                .map(|(term, buffer)| term.evaluate(buffer))
2433                .collect(),
2434        );
2435        Ok(self.likelihood_expression.evaluate(&likelihood_values))
2436    }
2437
2438    /// Evaluate the gradient of the stored [`LikelihoodExpression`] over the events in the [`Dataset`]
2439    /// stored by the [`LikelihoodEvaluator`] with the given values for free parameters.
2440    pub fn evaluate_gradient(&self, parameters: &[Float]) -> Result<DVector<Float>, LadduError> {
2441        let mut param_buffers: Vec<Vec<Float>> = self
2442            .likelihood_manager
2443            .param_counts
2444            .iter()
2445            .map(|&count| vec![0.0; count])
2446            .collect();
2447        for (layout, buffer) in self
2448            .likelihood_manager
2449            .param_layouts
2450            .iter()
2451            .zip(param_buffers.iter_mut())
2452        {
2453            for (buffer_idx, &param_idx) in layout.iter().enumerate() {
2454                buffer[buffer_idx] = parameters[param_idx];
2455            }
2456        }
2457        let likelihood_values = LikelihoodValues(
2458            self.likelihood_manager
2459                .terms
2460                .iter()
2461                .zip(param_buffers.iter())
2462                .map(|(term, buffer)| term.evaluate(buffer))
2463                .collect(),
2464        );
2465        let mut gradient_buffers: Vec<DVector<Float>> = (0..self.likelihood_manager.terms.len())
2466            .map(|_| DVector::zeros(self.likelihood_manager.param_names.len()))
2467            .collect();
2468        for (((term, param_buffer), gradient_buffer), layout) in self
2469            .likelihood_manager
2470            .terms
2471            .iter()
2472            .zip(param_buffers.iter())
2473            .zip(gradient_buffers.iter_mut())
2474            .zip(self.likelihood_manager.param_layouts.iter())
2475        {
2476            let term_gradient = term.evaluate_gradient(param_buffer); // This has a local layout
2477            for (term_idx, &buffer_idx) in layout.iter().enumerate() {
2478                gradient_buffer[buffer_idx] = term_gradient[term_idx] // This has a global layout
2479            }
2480        }
2481        let likelihood_gradients = LikelihoodGradients(gradient_buffers);
2482        Ok(self
2483            .likelihood_expression
2484            .evaluate_gradient(&likelihood_values, &likelihood_gradients))
2485    }
2486
2487    /// A function that can be called to minimize the sum/product of the [`LikelihoodTerm`]s
2488    /// contained by this [`LikelihoodEvaluator`].
2489    ///
2490    /// See [`NLL::minimize`] for more details.
2491    pub fn minimize(
2492        &self,
2493        p0: &[Float],
2494        bounds: Option<Vec<(Float, Float)>>,
2495        options: Option<MinimizerOptions>,
2496    ) -> Result<Status, LadduError> {
2497        let options = options.unwrap_or_default();
2498        let mut m = Minimizer::new(options.algorithm, self.parameters().len())
2499            .with_max_steps(options.max_steps);
2500        if let Some(bounds) = bounds {
2501            m = m.with_bounds(bounds);
2502        }
2503        for observer in options.observers {
2504            m = m.with_observer(observer)
2505        }
2506        #[cfg(feature = "rayon")]
2507        {
2508            m.minimize(
2509                self,
2510                p0,
2511                &mut ThreadPoolBuilder::new()
2512                    .num_threads(options.threads)
2513                    .build()
2514                    .map_err(LadduError::from)?,
2515                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2516            )?;
2517        }
2518        #[cfg(not(feature = "rayon"))]
2519        {
2520            m.minimize(
2521                self,
2522                p0,
2523                &mut (),
2524                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2525            )?;
2526        }
2527        Ok(m.status)
2528    }
2529
2530    /// A function that can be called to perform Markov Chain Monte Carlo sampling
2531    /// of the sum/product of the [`LikelihoodTerm`]s
2532    /// contained by this [`LikelihoodEvaluator`].
2533    ///
2534    /// See [`NLL::mcmc`] for more details.
2535    pub fn mcmc<T: AsRef<[DVector<Float>]>>(
2536        &self,
2537        p0: T,
2538        n_steps: usize,
2539        options: Option<MCMCOptions>,
2540        rng: Rng,
2541    ) -> Result<Ensemble, LadduError> {
2542        let options = options.unwrap_or(MCMCOptions::default_with_rng(rng));
2543        let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
2544        for observer in options.observers {
2545            m = m.with_observer(observer);
2546        }
2547        let func = NegativeLikelihoodEvaluator(self);
2548        #[cfg(feature = "rayon")]
2549        {
2550            m.sample(
2551                &func,
2552                &mut ThreadPoolBuilder::new()
2553                    .num_threads(options.threads)
2554                    .build()
2555                    .map_err(LadduError::from)?,
2556                n_steps,
2557                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2558            )?;
2559        }
2560        #[cfg(not(feature = "rayon"))]
2561        {
2562            m.sample(
2563                &func,
2564                &mut (),
2565                n_steps,
2566                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2567            )?;
2568        }
2569        Ok(m.ensemble)
2570    }
2571    /// A function that can be called to perform Particle Swarm Optimization
2572    /// of the sum/product of the [`LikelihoodTerm`]s
2573    /// contained by this [`LikelihoodEvaluator`].
2574    ///
2575    /// See [`NLL::swarm`] for more details.
2576    pub fn swarm(
2577        &self,
2578        swarm: Swarm,
2579        options: Option<SwarmOptions>,
2580        rng: Rng,
2581    ) -> Result<Swarm, LadduError> {
2582        let options = options.unwrap_or(SwarmOptions::default_with_rng(rng));
2583        let mut m = SwarmMinimizer::new(options.algorithm, swarm);
2584        for observer in options.observers {
2585            m = m.with_observer(observer);
2586        }
2587        #[cfg(feature = "rayon")]
2588        {
2589            m.minimize(
2590                self,
2591                &mut ThreadPoolBuilder::new()
2592                    .num_threads(options.threads)
2593                    .build()
2594                    .map_err(LadduError::from)?,
2595                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2596            )?;
2597        }
2598        #[cfg(not(feature = "rayon"))]
2599        {
2600            m.minimize(
2601                self,
2602                &mut (),
2603                ganesh::abort_signal::CtrlCAbortSignal::new().boxed(),
2604            )?;
2605        }
2606        Ok(m.swarm)
2607    }
2608}
2609
2610/// A class which can be used to evaluate a collection of LikelihoodTerms managed by a
2611/// LikelihoodManager
2612///
2613#[cfg(feature = "python")]
2614#[pyclass(name = "LikelihoodEvaluator", module = "laddu")]
2615pub struct PyLikelihoodEvaluator(LikelihoodEvaluator);
2616
2617#[cfg(feature = "python")]
2618#[pymethods]
2619impl PyLikelihoodEvaluator {
2620    /// A list of the names of the free parameters across all terms in all models
2621    ///
2622    /// Returns
2623    /// -------
2624    /// parameters : list of str
2625    ///
2626    #[getter]
2627    fn parameters(&self) -> Vec<String> {
2628        self.0.parameters()
2629    }
2630    /// Evaluate the sum of all terms in the evaluator
2631    ///
2632    /// Parameters
2633    /// ----------
2634    /// parameters : list of float
2635    ///     The values to use for the free parameters
2636    /// threads : int, optional
2637    ///     The number of threads to use (setting this to None will use all available CPUs)
2638    ///
2639    /// Returns
2640    /// -------
2641    /// result : float
2642    ///     The total negative log-likelihood summed over all terms
2643    ///
2644    /// Raises
2645    /// ------
2646    /// Exception
2647    ///     If there was an error building the thread pool
2648    ///
2649    #[pyo3(signature = (parameters, *, threads=None))]
2650    fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
2651        #[cfg(feature = "rayon")]
2652        {
2653            Ok(ThreadPoolBuilder::new()
2654                .num_threads(threads.unwrap_or_else(num_cpus::get))
2655                .build()
2656                .map_err(LadduError::from)?
2657                .install(|| self.0.evaluate(&parameters))?)
2658        }
2659        #[cfg(not(feature = "rayon"))]
2660        {
2661            Ok(self.0.evaluate(&parameters)?)
2662        }
2663    }
2664    /// Evaluate the gradient of the sum of all terms in the evaluator
2665    ///
2666    /// Parameters
2667    /// ----------
2668    /// parameters : list of float
2669    ///     The values to use for the free parameters
2670    /// threads : int, optional
2671    ///     The number of threads to use (setting this to None will use all available CPUs)
2672    ///
2673    /// Returns
2674    /// -------
2675    /// result : array_like
2676    ///     A ``numpy`` array of representing the gradient of the sum of all terms in the
2677    ///     evaluator
2678    ///
2679    /// Raises
2680    /// ------
2681    /// Exception
2682    ///     If there was an error building the thread pool or problem creating the resulting
2683    ///     ``numpy`` array
2684    ///
2685    #[pyo3(signature = (parameters, *, threads=None))]
2686    fn evaluate_gradient<'py>(
2687        &self,
2688        py: Python<'py>,
2689        parameters: Vec<Float>,
2690        threads: Option<usize>,
2691    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
2692        #[cfg(feature = "rayon")]
2693        {
2694            Ok(PyArray1::from_slice(
2695                py,
2696                ThreadPoolBuilder::new()
2697                    .num_threads(threads.unwrap_or_else(num_cpus::get))
2698                    .build()
2699                    .map_err(LadduError::from)?
2700                    .install(|| self.0.evaluate_gradient(&parameters))?
2701                    .as_slice(),
2702            ))
2703        }
2704        #[cfg(not(feature = "rayon"))]
2705        {
2706            Ok(PyArray1::from_slice(
2707                py,
2708                self.0.evaluate_gradient(&parameters)?.as_slice(),
2709            ))
2710        }
2711    }
2712
2713    /// Minimize all LikelihoodTerms with respect to the free parameters in the model
2714    ///
2715    /// This method "runs the fit". Given an initial position `p0` and optional `bounds`, this
2716    /// method performs a minimization over the total negative log-likelihood, optimizing the model
2717    /// over the stored signal data and Monte Carlo.
2718    ///
2719    /// Parameters
2720    /// ----------
2721    /// p0 : array_like
2722    ///     The initial parameters at the start of optimization
2723    /// bounds : list of tuple of float, optional
2724    ///     A list of lower and upper bound pairs for each parameter (use ``None`` for no bound)
2725    /// method : {LBFGSB, NelderMead}, optional
2726    ///     The minimization algorithm to use (defaults to LBFGSB)
2727    /// observers : Observer or list of Observers, optional
2728    ///     Callback functions which are applied after every algorithm step
2729    /// max_steps : int, default=4000
2730    ///     The maximum number of algorithm steps to perform
2731    /// debug : bool, default=False
2732    ///     Set to ``True`` to print out debugging information at each step
2733    /// verbose : bool, default=False
2734    ///     Set to ``True`` to print verbose information at each step
2735    /// show_step : bool, default=True
2736    ///     Include step number in verbose output
2737    /// show_x : bool, default=True
2738    ///     Include current best position in verbose output
2739    /// show_fx : bool, default=True
2740    ///     Include current best NLL in verbose output
2741    /// skip_hessian : bool, default = False
2742    ///     Skip calculation of the Hessian matrix (and parameter errors) at the termination of the
2743    ///     algorithm (use this when uncertainties are not needed/important)
2744    /// threads : int, optional
2745    ///     The number of threads to use (setting this to None will use all available CPUs)
2746    ///
2747    /// Returns
2748    /// -------
2749    /// Status
2750    ///     The status of the minimization algorithm at termination
2751    ///
2752    /// Raises
2753    /// ------
2754    /// Exception
2755    ///     If there was an error building the thread pool
2756    ///
2757    #[pyo3(signature = (p0, *, bounds=None, method=None, observers=None, max_steps=4000, debug=false, verbose=false, show_step=true, show_x=true, show_fx=true, skip_hessian=false, threads=None))]
2758    #[allow(clippy::too_many_arguments)]
2759    fn minimize(
2760        &self,
2761        p0: Vec<Float>,
2762        bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
2763        method: Option<Bound<'_, PyAny>>,
2764        observers: Option<Bound<'_, PyAny>>,
2765        max_steps: usize,
2766        debug: bool,
2767        verbose: bool,
2768        show_step: bool,
2769        show_x: bool,
2770        show_fx: bool,
2771        skip_hessian: bool,
2772        threads: Option<usize>,
2773    ) -> PyResult<PyStatus> {
2774        let bounds = bounds.map(|bounds_vec| {
2775            bounds_vec
2776                .iter()
2777                .map(|(opt_lb, opt_ub)| {
2778                    (
2779                        opt_lb.unwrap_or(Float::NEG_INFINITY),
2780                        opt_ub.unwrap_or(Float::INFINITY),
2781                    )
2782                })
2783                .collect()
2784        });
2785        let options = py_parse_minimizer_options(
2786            method,
2787            observers,
2788            max_steps,
2789            debug,
2790            verbose,
2791            show_step,
2792            show_x,
2793            show_fx,
2794            skip_hessian,
2795            threads,
2796        )?;
2797        let status = self.0.minimize(&p0, bounds, Some(options))?;
2798        Ok(PyStatus(status))
2799    }
2800
2801    /// Run an MCMC algorithm on the free parameters of the LikelihoodTerm's model
2802    ///
2803    /// This method can be used to sample the underlying log-likelihood given an initial
2804    /// position for each walker `p0`.
2805    ///
2806    /// Parameters
2807    /// ----------
2808    /// p0 : array_like
2809    ///     The initial parameters at the start of optimization
2810    /// n_steps : int,
2811    ///     The number of MCMC steps each walker should take
2812    /// method : {ESS, AIES}, optional
2813    ///     The MCMC algorithm to use (defaults to ESS)
2814    /// observers : MCMCObserver or list of MCMCObservers, optional
2815    ///     Callback functions which are applied after every sampling step
2816    /// debug : bool, default=False
2817    ///     Set to ``True`` to print out debugging information at each step
2818    /// verbose : bool, default=False
2819    ///     Set to ``True`` to print verbose information at each step
2820    /// seed : int,
2821    ///     The seed for the random number generator
2822    /// threads : int, optional
2823    ///     The number of threads to use (setting this to None will use all available CPUs)
2824    ///
2825    /// Returns
2826    /// -------
2827    /// Ensemble
2828    ///     The resulting ensemble of walkers
2829    ///
2830    /// Raises
2831    /// ------
2832    /// Exception
2833    ///     If there was an error building the thread pool
2834    ///
2835    /// Notes
2836    /// -----
2837    /// The default algorithm is the ESS algorithm with a moveset of of 90% differential moves to
2838    /// 10% gaussian moves.
2839    ///
2840    /// Since MCMC methods are inclined to sample maxima rather than minima, the underlying
2841    /// function sign is automatically flipped when calling this method.
2842    ///
2843    #[pyo3(signature = (p0, n_steps, *, method=None, observers=None, debug=false, verbose=false, seed=0, threads=None))]
2844    #[allow(clippy::too_many_arguments)]
2845    fn mcmc(
2846        &self,
2847        p0: Vec<Vec<Float>>,
2848        n_steps: usize,
2849        method: Option<Bound<'_, PyAny>>,
2850        observers: Option<Bound<'_, PyAny>>,
2851        debug: bool,
2852        verbose: bool,
2853        seed: u64,
2854        threads: Option<usize>,
2855    ) -> PyResult<PyEnsemble> {
2856        let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
2857        let mut rng = Rng::new();
2858        rng.seed(seed);
2859        let options =
2860            py_parse_mcmc_options(method, observers, debug, verbose, threads, rng.clone())?;
2861        let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
2862        Ok(PyEnsemble(ensemble))
2863    }
2864
2865    /// Run a Particle Swarm Optimization algorithm on the free parameters of the LikelihoodTerm's model
2866    ///
2867    /// This method can be used minimize the underlying negative log-likelihood given
2868    /// an initial swarm position.
2869    ///
2870    /// Parameters
2871    /// ----------
2872    /// swarm : Swarm
2873    ///     The initial position of the swarm
2874    /// method : {PSO}, optional
2875    ///     The swarm algorithm to use (defaults to PSO)
2876    /// observers : SwarmObserver or list of SwarmObservers, optional
2877    ///     Callback functions which are applied after every swarm step
2878    /// debug : bool, default=False
2879    ///     Set to ``True`` to print out debugging information at each step
2880    /// verbose : bool, default=False
2881    ///     Set to ``True`` to print verbose information at each step
2882    /// seed : int,
2883    ///     The seed for the random number generator
2884    /// threads : int, optional
2885    ///     The number of threads to use (setting this to None will use all available CPUs)
2886    ///
2887    /// Returns
2888    /// -------
2889    /// Swarm
2890    ///     The swarm at its final position
2891    ///
2892    /// Raises
2893    /// ------
2894    /// Exception
2895    ///     If there was an error building the thread pool
2896    ///
2897    #[pyo3(signature = (swarm, *, method=None, observers=None, debug=false, verbose=false, seed=0, threads=None))]
2898    #[allow(clippy::too_many_arguments)]
2899    fn swarm(
2900        &self,
2901        swarm: PySwarm,
2902        method: Option<Bound<'_, PyAny>>,
2903        observers: Option<Bound<'_, PyAny>>,
2904        debug: bool,
2905        verbose: bool,
2906        seed: u64,
2907        threads: Option<usize>,
2908    ) -> PyResult<PySwarm> {
2909        let mut rng = Rng::new();
2910        rng.seed(seed);
2911        let options =
2912            py_parse_swarm_options(method, observers, debug, verbose, threads, rng.clone())?;
2913        let swarm = self.0.swarm(swarm.0, Some(options), rng)?;
2914        Ok(PySwarm(swarm))
2915    }
2916}
2917
2918/// A [`LikelihoodTerm`] which represents a single scaling parameter.
2919#[derive(Clone)]
2920pub struct LikelihoodScalar(String);
2921
2922impl LikelihoodScalar {
2923    /// Create a new [`LikelihoodScalar`] with a parameter with the given name.
2924    pub fn new<T: AsRef<str>>(name: T) -> Box<Self> {
2925        Self(name.as_ref().into()).into()
2926    }
2927}
2928
2929impl LikelihoodTerm for LikelihoodScalar {
2930    fn evaluate(&self, parameters: &[Float]) -> Float {
2931        parameters[0]
2932    }
2933
2934    fn evaluate_gradient(&self, _parameters: &[Float]) -> DVector<Float> {
2935        DVector::from_vec(vec![1.0])
2936    }
2937
2938    fn parameters(&self) -> Vec<String> {
2939        vec![self.0.clone()]
2940    }
2941}
2942
2943/// A parameterized scalar term which can be added to a LikelihoodManager
2944///
2945/// Parameters
2946/// ----------
2947/// name : str
2948///     The name of the new scalar parameter
2949///
2950/// Returns
2951/// -------
2952/// LikelihoodTerm
2953///
2954#[cfg(feature = "python")]
2955#[pyfunction(name = "LikelihoodScalar")]
2956pub fn py_likelihood_scalar(name: String) -> PyLikelihoodTerm {
2957    PyLikelihoodTerm(LikelihoodScalar::new(name))
2958}