laddu_extensions/
likelihoods.rs

1use std::{
2    collections::HashMap,
3    fmt::{Debug, Display},
4    sync::Arc,
5};
6
7use accurate::{sum::Klein, traits::*};
8use auto_ops::*;
9use dyn_clone::DynClone;
10use fastrand::Rng;
11use ganesh::{
12    mcmc::{ESSMove, Ensemble},
13    Function, Minimizer, Sampler, Status,
14};
15use laddu_core::{
16    amplitudes::{central_difference, AmplitudeValues, Evaluator, GradientValues, Model},
17    data::Dataset,
18    resources::Parameters,
19    Complex, DVector, Float, LadduError,
20};
21
22#[cfg(feature = "mpi")]
23use laddu_core::mpi::LadduMPI;
24
25#[cfg(feature = "mpi")]
26use mpi::{datatype::PartitionMut, topology::SimpleCommunicator, traits::*};
27
28#[cfg(feature = "python")]
29use crate::ganesh_ext::py_ganesh::{
30    py_parse_mcmc_options, py_parse_minimizer_options, PyEnsemble, PyStatus,
31};
32#[cfg(feature = "python")]
33use laddu_python::{
34    amplitudes::{PyEvaluator, PyModel},
35    data::PyDataset,
36};
37#[cfg(feature = "python")]
38use numpy::PyArray1;
39#[cfg(feature = "python")]
40use pyo3::{
41    exceptions::PyTypeError,
42    prelude::*,
43    types::{PyDict, PyList},
44};
45#[cfg(feature = "rayon")]
46use rayon::{prelude::*, ThreadPool, ThreadPoolBuilder};
47
48use crate::ganesh_ext::{MCMCOptions, MinimizerOptions};
49
50/// A trait which describes a term that can be used like a likelihood (more correctly, a negative
51/// log-likelihood) in a minimization.
52pub trait LikelihoodTerm: DynClone + Send + Sync {
53    /// Evaluate the term given some input parameters.
54    fn evaluate(&self, parameters: &[Float]) -> Float;
55    /// Evaluate the gradient of the term given some input parameters.
56    fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
57        central_difference(parameters, |parameters: &[Float]| self.evaluate(parameters))
58    }
59    /// The list of names of the input parameters for [`LikelihoodTerm::evaluate`].
60    fn parameters(&self) -> Vec<String>;
61}
62
63dyn_clone::clone_trait_object!(LikelihoodTerm);
64
65/// A term in an expression with multiple likelihood components
66///
67/// See Also
68/// --------
69/// NLL.as_term
70///
71#[cfg(feature = "python")]
72#[pyclass(name = "LikelihoodTerm", module = "laddu")]
73#[derive(Clone)]
74pub struct PyLikelihoodTerm(pub Box<dyn LikelihoodTerm>);
75
76/// An extended, unbinned negative log-likelihood evaluator.
77#[derive(Clone)]
78pub struct NLL {
79    /// The internal [`Evaluator`] for data
80    pub data_evaluator: Evaluator,
81    /// The interanl [`Evaluator`] for accepted Monte Carlo
82    pub accmc_evaluator: Evaluator,
83}
84
85impl NLL {
86    /// Construct an [`NLL`] from a [`Model`] and two [`Dataset`]s (data and Monte Carlo). This is the equivalent of the [`Model::load`] method,
87    /// but for two [`Dataset`]s and a different method of evaluation.
88    pub fn new(model: &Model, ds_data: &Arc<Dataset>, ds_accmc: &Arc<Dataset>) -> Box<Self> {
89        Self {
90            data_evaluator: model.load(ds_data),
91            accmc_evaluator: model.load(ds_accmc),
92        }
93        .into()
94    }
95    /// Activate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name.
96    pub fn activate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
97        self.data_evaluator.activate(&name)?;
98        self.accmc_evaluator.activate(&name)
99    }
100    /// Activate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name.
101    pub fn activate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
102        self.data_evaluator.activate_many(names)?;
103        self.accmc_evaluator.activate_many(names)
104    }
105    /// Activate all registered [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s.
106    pub fn activate_all(&self) {
107        self.data_evaluator.activate_all();
108        self.accmc_evaluator.activate_all();
109    }
110    /// Dectivate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name.
111    pub fn deactivate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
112        self.data_evaluator.deactivate(&name)?;
113        self.accmc_evaluator.deactivate(&name)
114    }
115    /// Deactivate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name.
116    pub fn deactivate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
117        self.data_evaluator.deactivate_many(names)?;
118        self.accmc_evaluator.deactivate_many(names)
119    }
120    /// Deactivate all registered [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s.
121    pub fn deactivate_all(&self) {
122        self.data_evaluator.deactivate_all();
123        self.accmc_evaluator.deactivate_all();
124    }
125    /// Isolate an [`Amplitude`](`laddu_core::amplitudes::Amplitude`) by name (deactivate the rest).
126    pub fn isolate<T: AsRef<str>>(&self, name: T) -> Result<(), LadduError> {
127        self.data_evaluator.isolate(&name)?;
128        self.accmc_evaluator.isolate(&name)
129    }
130    /// Isolate several [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name (deactivate the rest).
131    pub fn isolate_many<T: AsRef<str>>(&self, names: &[T]) -> Result<(), LadduError> {
132        self.data_evaluator.isolate_many(names)?;
133        self.accmc_evaluator.isolate_many(names)
134    }
135
136    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
137    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
138    /// Monte-Carlo event (non-MPI version).
139    ///
140    /// # Notes
141    ///
142    /// This method is not intended to be called in analyses but rather in writing methods
143    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project`] instead.
144    pub fn project_local(
145        &self,
146        parameters: &[Float],
147        mc_evaluator: Option<Evaluator>,
148    ) -> Vec<Float> {
149        let (mc_dataset, result) = if let Some(mc_evaluator) = mc_evaluator {
150            (
151                mc_evaluator.dataset.clone(),
152                mc_evaluator.evaluate_local(parameters),
153            )
154        } else {
155            (
156                self.accmc_evaluator.dataset.clone(),
157                self.accmc_evaluator.evaluate_local(parameters),
158            )
159        };
160        let n_mc = self.accmc_evaluator.dataset.n_events();
161        #[cfg(feature = "rayon")]
162        let output: Vec<Float> = result
163            .par_iter()
164            .zip(mc_dataset.events.par_iter())
165            .map(|(l, e)| e.weight * l.re / n_mc as Float)
166            .collect();
167
168        #[cfg(not(feature = "rayon"))]
169        let output: Vec<Float> = result
170            .iter()
171            .zip(mc_dataset.events.iter())
172            .map(|(l, e)| e.weight * l.re / n_mc as Float)
173            .collect();
174        output
175    }
176
177    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
178    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
179    /// Monte-Carlo event (MPI-compatible version).
180    ///
181    /// # Notes
182    ///
183    /// This method is not intended to be called in analyses but rather in writing methods
184    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project`] instead.
185    #[cfg(feature = "mpi")]
186    pub fn project_mpi(
187        &self,
188        parameters: &[Float],
189        mc_evaluator: Option<Evaluator>,
190        world: &SimpleCommunicator,
191    ) -> Vec<Float> {
192        let n_events = mc_evaluator
193            .as_ref()
194            .unwrap_or(&self.accmc_evaluator)
195            .dataset
196            .n_events();
197        let local_projection = self.project_local(parameters, mc_evaluator);
198        let mut buffer: Vec<Float> = vec![0.0; n_events];
199        let (counts, displs) = world.get_counts_displs(n_events);
200        {
201            let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
202            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
203        }
204        buffer
205    }
206
207    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
208    /// [`Evaluator`] with the given values for free parameters to obtain weights for each
209    /// Monte-Carlo event. This method takes the real part of the given expression (discarding
210    /// the imaginary part entirely, which does not matter if expressions are coherent sums
211    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
212    /// Event weights are determined by the following formula:
213    ///
214    /// ```math
215    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
216    /// ```
217    ///
218    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
219    /// regardless of the `mc_evaluator`.
220    pub fn project(&self, parameters: &[Float], mc_evaluator: Option<Evaluator>) -> Vec<Float> {
221        #[cfg(feature = "mpi")]
222        {
223            if let Some(world) = laddu_core::mpi::get_world() {
224                return self.project_mpi(parameters, mc_evaluator, &world);
225            }
226        }
227        self.project_local(parameters, mc_evaluator)
228    }
229
230    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
231    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
232    /// those weights for each Monte-Carlo event (non-MPI version).
233    ///
234    /// # Notes
235    ///
236    /// This method is not intended to be called in analyses but rather in writing methods
237    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_gradient`] instead.
238    pub fn project_gradient_local(
239        &self,
240        parameters: &[Float],
241        mc_evaluator: Option<Evaluator>,
242    ) -> (Vec<Float>, Vec<DVector<Float>>) {
243        let (mc_dataset, result, result_gradient) = if let Some(mc_evaluator) = mc_evaluator {
244            (
245                mc_evaluator.dataset.clone(),
246                mc_evaluator.evaluate_local(parameters),
247                mc_evaluator.evaluate_gradient_local(parameters),
248            )
249        } else {
250            (
251                self.accmc_evaluator.dataset.clone(),
252                self.accmc_evaluator.evaluate_local(parameters),
253                self.accmc_evaluator.evaluate_gradient_local(parameters),
254            )
255        };
256        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
257        #[cfg(feature = "rayon")]
258        {
259            (
260                result
261                    .par_iter()
262                    .zip(mc_dataset.events.par_iter())
263                    .map(|(l, e)| e.weight * l.re / n_mc)
264                    .collect(),
265                result_gradient
266                    .par_iter()
267                    .zip(mc_dataset.events.par_iter())
268                    .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
269                    .collect(),
270            )
271        }
272        #[cfg(not(feature = "rayon"))]
273        {
274            (
275                result
276                    .iter()
277                    .zip(mc_dataset.events.iter())
278                    .map(|(l, e)| e.weight * l.re / n_mc)
279                    .collect(),
280                result_gradient
281                    .iter()
282                    .zip(mc_dataset.events.iter())
283                    .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
284                    .collect(),
285            )
286        }
287    }
288
289    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
290    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
291    /// those weights for each Monte-Carlo event (MPI-compatible version).
292    ///
293    /// # Notes
294    ///
295    /// This method is not intended to be called in analyses but rather in writing methods
296    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_gradient`] instead.
297    #[cfg(feature = "mpi")]
298    pub fn project_gradient_mpi(
299        &self,
300        parameters: &[Float],
301        mc_evaluator: Option<Evaluator>,
302        world: &SimpleCommunicator,
303    ) -> (Vec<Float>, Vec<DVector<Float>>) {
304        let n_events = mc_evaluator
305            .as_ref()
306            .unwrap_or(&self.accmc_evaluator)
307            .dataset
308            .n_events();
309        let (local_projection, local_gradient_projection) =
310            self.project_gradient_local(parameters, mc_evaluator);
311        let mut projection_result: Vec<Float> = vec![0.0; n_events];
312        let (counts, displs) = world.get_counts_displs(n_events);
313        {
314            let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
315            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
316        }
317
318        let flattened_local_gradient_projection = local_gradient_projection
319            .iter()
320            .flat_map(|g| g.data.as_vec().to_vec())
321            .collect::<Vec<Float>>();
322        let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
323        let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
324        let mut partitioned_flattened_result_buffer =
325            PartitionMut::new(&mut flattened_result_buffer, counts, displs);
326        world.all_gather_varcount_into(
327            &flattened_local_gradient_projection,
328            &mut partitioned_flattened_result_buffer,
329        );
330        let gradient_projection_result = flattened_result_buffer
331            .chunks(parameters.len())
332            .map(DVector::from_row_slice)
333            .collect();
334        (projection_result, gradient_projection_result)
335    }
336    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
337    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
338    /// those weights for each Monte-Carlo event. This method takes the real part of the given
339    /// expression (discarding the imaginary part entirely, which does not matter if expressions
340    /// are coherent sums wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
341    /// Event weights are determined by the following formula:
342    ///
343    /// ```math
344    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
345    /// ```
346    ///
347    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
348    /// regardless of the `mc_evaluator`.
349    pub fn project_gradient(
350        &self,
351        parameters: &[Float],
352        mc_evaluator: Option<Evaluator>,
353    ) -> (Vec<Float>, Vec<DVector<Float>>) {
354        #[cfg(feature = "mpi")]
355        {
356            if let Some(world) = laddu_core::mpi::get_world() {
357                return self.project_gradient_mpi(parameters, mc_evaluator, &world);
358            }
359        }
360        self.project_gradient_local(parameters, mc_evaluator)
361    }
362
363    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
364    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
365    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
366    /// by name, but returns the [`NLL`] to its prior state after calculation (non-MPI version).
367    ///
368    /// # Notes
369    ///
370    /// This method is not intended to be called in analyses but rather in writing methods
371    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
372    pub fn project_with_local<T: AsRef<str>>(
373        &self,
374        parameters: &[Float],
375        names: &[T],
376        mc_evaluator: Option<Evaluator>,
377    ) -> Result<Vec<Float>, LadduError> {
378        if let Some(mc_evaluator) = &mc_evaluator {
379            let current_active_mc = mc_evaluator.resources.read().active.clone();
380            mc_evaluator.isolate_many(names)?;
381            let mc_dataset = mc_evaluator.dataset.clone();
382            let result = mc_evaluator.evaluate_local(parameters);
383            let n_mc = self.accmc_evaluator.dataset.n_events();
384            #[cfg(feature = "rayon")]
385            let output: Vec<Float> = result
386                .par_iter()
387                .zip(mc_dataset.events.par_iter())
388                .map(|(l, e)| e.weight * l.re / n_mc as Float)
389                .collect();
390            #[cfg(not(feature = "rayon"))]
391            let output: Vec<Float> = result
392                .iter()
393                .zip(mc_dataset.events.iter())
394                .map(|(l, e)| e.weight * l.re / n_mc as Float)
395                .collect();
396            mc_evaluator.resources.write().active = current_active_mc;
397            Ok(output)
398        } else {
399            let current_active_data = self.data_evaluator.resources.read().active.clone();
400            let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
401            self.isolate_many(names)?;
402            let mc_dataset = &self.accmc_evaluator.dataset;
403            let result = self.accmc_evaluator.evaluate_local(parameters);
404            let n_mc = self.accmc_evaluator.dataset.n_events();
405            #[cfg(feature = "rayon")]
406            let output: Vec<Float> = result
407                .par_iter()
408                .zip(mc_dataset.events.par_iter())
409                .map(|(l, e)| e.weight * l.re / n_mc as Float)
410                .collect();
411            #[cfg(not(feature = "rayon"))]
412            let output: Vec<Float> = result
413                .iter()
414                .zip(mc_dataset.events.iter())
415                .map(|(l, e)| e.weight * l.re / n_mc as Float)
416                .collect();
417            self.data_evaluator.resources.write().active = current_active_data;
418            self.accmc_evaluator.resources.write().active = current_active_accmc;
419            Ok(output)
420        }
421    }
422
423    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
424    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
425    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
426    /// by name, but returns the [`NLL`] to its prior state after calculation (MPI-compatible version).
427    ///
428    /// # Notes
429    ///
430    /// This method is not intended to be called in analyses but rather in writing methods
431    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
432    #[cfg(feature = "mpi")]
433    pub fn project_with_mpi<T: AsRef<str>>(
434        &self,
435        parameters: &[Float],
436        names: &[T],
437        mc_evaluator: Option<Evaluator>,
438        world: &SimpleCommunicator,
439    ) -> Result<Vec<Float>, LadduError> {
440        let n_events = mc_evaluator
441            .as_ref()
442            .unwrap_or(&self.accmc_evaluator)
443            .dataset
444            .n_events();
445        let local_projection = self.project_with_local(parameters, names, mc_evaluator)?;
446        let mut buffer: Vec<Float> = vec![0.0; n_events];
447        let (counts, displs) = world.get_counts_displs(n_events);
448        {
449            let mut partitioned_buffer = PartitionMut::new(&mut buffer, counts, displs);
450            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
451        }
452        Ok(buffer)
453    }
454
455    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
456    /// [`Evaluator`] with the given values for free parameters to obtain weights for each Monte-Carlo event. This method differs from the standard
457    /// [`NLL::project`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
458    /// by name, but returns the [`NLL`] to its prior state after calculation.
459    ///
460    /// This method takes the real part of the given expression (discarding
461    /// the imaginary part entirely, which does not matter if expressions are coherent sums
462    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
463    /// Event weights are determined by the following formula:
464    ///
465    /// ```math
466    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
467    /// ```
468    ///
469    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
470    /// regardless of the `mc_evaluator`.
471    pub fn project_with<T: AsRef<str>>(
472        &self,
473        parameters: &[Float],
474        names: &[T],
475        mc_evaluator: Option<Evaluator>,
476    ) -> Result<Vec<Float>, LadduError> {
477        #[cfg(feature = "mpi")]
478        {
479            if let Some(world) = laddu_core::mpi::get_world() {
480                return self.project_with_mpi(parameters, names, mc_evaluator, &world);
481            }
482        }
483        self.project_with_local(parameters, names, mc_evaluator)
484    }
485
486    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
487    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
488    /// those weights for each Monte-Carlo event. This method differs from the standard
489    /// [`NLL::project_gradient`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
490    /// by name, but returns the [`NLL`] to its prior state after calculation (non-MPI version).
491    ///
492    /// # Notes
493    ///
494    /// This method is not intended to be called in analyses but rather in writing methods
495    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
496    pub fn project_gradient_with_local<T: AsRef<str>>(
497        &self,
498        parameters: &[Float],
499        names: &[T],
500        mc_evaluator: Option<Evaluator>,
501    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
502        if let Some(mc_evaluator) = &mc_evaluator {
503            let current_active_mc = mc_evaluator.resources.read().active.clone();
504            mc_evaluator.isolate_many(names)?;
505            let mc_dataset = mc_evaluator.dataset.clone();
506            let result = mc_evaluator.evaluate_local(parameters);
507            let result_gradient = mc_evaluator.evaluate_gradient(parameters);
508            let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
509            #[cfg(feature = "rayon")]
510            let (res, res_gradient) = {
511                (
512                    result
513                        .par_iter()
514                        .zip(mc_dataset.events.par_iter())
515                        .map(|(l, e)| e.weight * l.re / n_mc)
516                        .collect(),
517                    result_gradient
518                        .par_iter()
519                        .zip(mc_dataset.events.par_iter())
520                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
521                        .collect(),
522                )
523            };
524            #[cfg(not(feature = "rayon"))]
525            let (res, res_gradient) = {
526                (
527                    result
528                        .iter()
529                        .zip(mc_dataset.events.iter())
530                        .map(|(l, e)| e.weight * l.re / n_mc)
531                        .collect(),
532                    result_gradient
533                        .iter()
534                        .zip(mc_dataset.events.iter())
535                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
536                        .collect(),
537                )
538            };
539            mc_evaluator.resources.write().active = current_active_mc;
540            Ok((res, res_gradient))
541        } else {
542            let current_active_data = self.data_evaluator.resources.read().active.clone();
543            let current_active_accmc = self.accmc_evaluator.resources.read().active.clone();
544            self.isolate_many(names)?;
545            let mc_dataset = &self.accmc_evaluator.dataset;
546            let result = self.accmc_evaluator.evaluate_local(parameters);
547            let result_gradient = self.accmc_evaluator.evaluate_gradient(parameters);
548            let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
549            #[cfg(feature = "rayon")]
550            let (res, res_gradient) = {
551                (
552                    result
553                        .par_iter()
554                        .zip(mc_dataset.events.par_iter())
555                        .map(|(l, e)| e.weight * l.re / n_mc)
556                        .collect(),
557                    result_gradient
558                        .par_iter()
559                        .zip(mc_dataset.events.par_iter())
560                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
561                        .collect(),
562                )
563            };
564            #[cfg(not(feature = "rayon"))]
565            let (res, res_gradient) = {
566                (
567                    result
568                        .iter()
569                        .zip(mc_dataset.events.iter())
570                        .map(|(l, e)| e.weight * l.re / n_mc)
571                        .collect(),
572                    result_gradient
573                        .iter()
574                        .zip(mc_dataset.events.iter())
575                        .map(|(grad_l, e)| grad_l.map(|g| g.re).scale(e.weight / n_mc))
576                        .collect(),
577                )
578            };
579            self.data_evaluator.resources.write().active = current_active_data;
580            self.accmc_evaluator.resources.write().active = current_active_accmc;
581            Ok((res, res_gradient))
582        }
583    }
584
585    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
586    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
587    /// those weights for each Monte-Carlo event. This method differs from the standard
588    /// [`NLL::project_gradient`] in that it first isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s
589    /// by name, but returns the [`NLL`] to its prior state after calculation (MPI-compatible version).
590    ///
591    /// # Notes
592    ///
593    /// This method is not intended to be called in analyses but rather in writing methods
594    /// that have `mpi`-feature-gated versions. Most users will want to call [`NLL::project_with`] instead.
595    #[cfg(feature = "mpi")]
596    pub fn project_gradient_with_mpi<T: AsRef<str>>(
597        &self,
598        parameters: &[Float],
599        names: &[T],
600        mc_evaluator: Option<Evaluator>,
601        world: &SimpleCommunicator,
602    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
603        let n_events = mc_evaluator
604            .as_ref()
605            .unwrap_or(&self.accmc_evaluator)
606            .dataset
607            .n_events();
608        let (local_projection, local_gradient_projection) =
609            self.project_gradient_with_local(parameters, names, mc_evaluator)?;
610        let mut projection_result: Vec<Float> = vec![0.0; n_events];
611        let (counts, displs) = world.get_counts_displs(n_events);
612        {
613            let mut partitioned_buffer = PartitionMut::new(&mut projection_result, counts, displs);
614            world.all_gather_varcount_into(&local_projection, &mut partitioned_buffer);
615        }
616
617        let flattened_local_gradient_projection = local_gradient_projection
618            .iter()
619            .flat_map(|g| g.data.as_vec().to_vec())
620            .collect::<Vec<Float>>();
621        let (counts, displs) = world.get_flattened_counts_displs(n_events, parameters.len());
622        let mut flattened_result_buffer = vec![0.0; n_events * parameters.len()];
623        let mut partitioned_flattened_result_buffer =
624            PartitionMut::new(&mut flattened_result_buffer, counts, displs);
625        world.all_gather_varcount_into(
626            &flattened_local_gradient_projection,
627            &mut partitioned_flattened_result_buffer,
628        );
629        let gradient_projection_result = flattened_result_buffer
630            .chunks(parameters.len())
631            .map(DVector::from_row_slice)
632            .collect();
633        Ok((projection_result, gradient_projection_result))
634    }
635    /// Project the stored [`Model`] over the events in the [`Dataset`] stored by the
636    /// [`Evaluator`] with the given values for free parameters to obtain weights and gradients of
637    /// those weights for each
638    /// Monte-Carlo event. This method differs from the standard [`NLL::project_gradient`] in that it first
639    /// isolates the selected [`Amplitude`](`laddu_core::amplitudes::Amplitude`)s by name, but returns
640    /// the [`NLL`] to its prior state after calculation.
641    ///
642    /// This method takes the real part of the given expression (discarding
643    /// the imaginary part entirely, which does not matter if expressions are coherent sums
644    /// wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
645    /// Event weights are determined by the following formula:
646    ///
647    /// ```math
648    /// \text{weight}(\vec{p}; e) = \text{weight}(e) \mathcal{L}(e) / N_{\text{MC}}
649    /// ```
650    ///
651    /// Note that $`N_{\text{MC}}`$ will always be the number of accepted Monte Carlo events,
652    /// regardless of the `mc_evaluator`.
653    pub fn project_gradient_with<T: AsRef<str>>(
654        &self,
655        parameters: &[Float],
656        names: &[T],
657        mc_evaluator: Option<Evaluator>,
658    ) -> Result<(Vec<Float>, Vec<DVector<Float>>), LadduError> {
659        #[cfg(feature = "mpi")]
660        {
661            if let Some(world) = laddu_core::mpi::get_world() {
662                return self.project_gradient_with_mpi(parameters, names, mc_evaluator, &world);
663            }
664        }
665        self.project_gradient_with_local(parameters, names, mc_evaluator)
666    }
667
668    fn evaluate_local(&self, parameters: &[Float]) -> Float {
669        let data_result = self.data_evaluator.evaluate_local(parameters);
670        let mc_result = self.accmc_evaluator.evaluate_local(parameters);
671        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
672        #[cfg(feature = "rayon")]
673        let data_term: Float = data_result
674            .par_iter()
675            .zip(self.data_evaluator.dataset.events.par_iter())
676            .map(|(l, e)| e.weight * Float::ln(l.re))
677            .parallel_sum_with_accumulator::<Klein<Float>>();
678        #[cfg(feature = "rayon")]
679        let mc_term: Float = mc_result
680            .par_iter()
681            .zip(self.accmc_evaluator.dataset.events.par_iter())
682            .map(|(l, e)| e.weight * l.re)
683            .parallel_sum_with_accumulator::<Klein<Float>>();
684        #[cfg(not(feature = "rayon"))]
685        let data_term: Float = data_result
686            .iter()
687            .zip(self.data_evaluator.dataset.events.iter())
688            .map(|(l, e)| e.weight * Float::ln(l.re))
689            .sum_with_accumulator::<Klein<Float>>();
690        #[cfg(not(feature = "rayon"))]
691        let mc_term: Float = mc_result
692            .iter()
693            .zip(self.accmc_evaluator.dataset.events.iter())
694            .map(|(l, e)| e.weight * l.re)
695            .sum_with_accumulator::<Klein<Float>>();
696        -2.0 * (data_term - mc_term / n_mc)
697    }
698
699    #[cfg(feature = "mpi")]
700    fn evaluate_mpi(&self, parameters: &[Float], world: &SimpleCommunicator) -> Float {
701        let local_evaluation = self.evaluate_local(parameters);
702        let mut buffer: Vec<Float> = vec![0.0; world.size() as usize];
703        world.all_gather_into(&local_evaluation, &mut buffer);
704        buffer.iter().sum()
705    }
706
707    fn evaluate_gradient_local(&self, parameters: &[Float]) -> DVector<Float> {
708        let data_resources = self.data_evaluator.resources.read();
709        let data_parameters = Parameters::new(parameters, &data_resources.constants);
710        let mc_resources = self.accmc_evaluator.resources.read();
711        let mc_parameters = Parameters::new(parameters, &mc_resources.constants);
712        let n_mc = self.accmc_evaluator.dataset.n_events() as Float;
713        #[cfg(feature = "rayon")]
714        let data_term: DVector<Float> = self
715            .data_evaluator
716            .dataset
717            .events
718            .par_iter()
719            .zip(data_resources.caches.par_iter())
720            .map(|(event, cache)| {
721                let mut gradient_values =
722                    vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
723                self.data_evaluator
724                    .amplitudes
725                    .iter()
726                    .zip(data_resources.active.iter())
727                    .zip(gradient_values.iter_mut())
728                    .for_each(|((amp, active), grad)| {
729                        if *active {
730                            amp.compute_gradient(&data_parameters, event, cache, grad)
731                        }
732                    });
733                (
734                    event.weight,
735                    AmplitudeValues(
736                        self.data_evaluator
737                            .amplitudes
738                            .iter()
739                            .zip(data_resources.active.iter())
740                            .map(|(amp, active)| {
741                                if *active {
742                                    amp.compute(&data_parameters, event, cache)
743                                } else {
744                                    Complex::ZERO
745                                }
746                            })
747                            .collect(),
748                    ),
749                    GradientValues(gradient_values),
750                )
751            })
752            .map(|(weight, amp_vals, grad_vals)| {
753                (
754                    weight,
755                    self.data_evaluator.expression.evaluate(&amp_vals),
756                    self.data_evaluator
757                        .expression
758                        .evaluate_gradient(&amp_vals, &grad_vals),
759                )
760            })
761            .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
762            .collect::<Vec<DVector<Float>>>()
763            .iter()
764            .sum(); // TODO: replace with custom implementation of accurate crate's trait
765        #[cfg(feature = "rayon")]
766        let mc_term: DVector<Float> = self
767            .accmc_evaluator
768            .dataset
769            .events
770            .par_iter()
771            .zip(mc_resources.caches.par_iter())
772            .map(|(event, cache)| {
773                let mut gradient_values =
774                    vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
775                self.accmc_evaluator
776                    .amplitudes
777                    .iter()
778                    .zip(mc_resources.active.iter())
779                    .zip(gradient_values.iter_mut())
780                    .for_each(|((amp, active), grad)| {
781                        if *active {
782                            amp.compute_gradient(&mc_parameters, event, cache, grad)
783                        }
784                    });
785                (
786                    event.weight,
787                    AmplitudeValues(
788                        self.accmc_evaluator
789                            .amplitudes
790                            .iter()
791                            .zip(mc_resources.active.iter())
792                            .map(|(amp, active)| {
793                                if *active {
794                                    amp.compute(&mc_parameters, event, cache)
795                                } else {
796                                    Complex::ZERO
797                                }
798                            })
799                            .collect(),
800                    ),
801                    GradientValues(gradient_values),
802                )
803            })
804            .map(|(weight, amp_vals, grad_vals)| {
805                (
806                    weight,
807                    self.accmc_evaluator
808                        .expression
809                        .evaluate_gradient(&amp_vals, &grad_vals),
810                )
811            })
812            .map(|(w, g)| w * g.map(|gi| gi.re))
813            .collect::<Vec<DVector<Float>>>()
814            .iter()
815            .sum();
816        #[cfg(not(feature = "rayon"))]
817        let data_term: DVector<Float> = self
818            .data_evaluator
819            .dataset
820            .events
821            .iter()
822            .zip(data_resources.caches.iter())
823            .map(|(event, cache)| {
824                let mut gradient_values =
825                    vec![DVector::zeros(parameters.len()); self.data_evaluator.amplitudes.len()];
826                self.data_evaluator
827                    .amplitudes
828                    .iter()
829                    .zip(data_resources.active.iter())
830                    .zip(gradient_values.iter_mut())
831                    .for_each(|((amp, active), grad)| {
832                        if *active {
833                            amp.compute_gradient(&data_parameters, event, cache, grad)
834                        }
835                    });
836                (
837                    event.weight,
838                    AmplitudeValues(
839                        self.data_evaluator
840                            .amplitudes
841                            .iter()
842                            .zip(data_resources.active.iter())
843                            .map(|(amp, active)| {
844                                if *active {
845                                    amp.compute(&data_parameters, event, cache)
846                                } else {
847                                    Complex::ZERO
848                                }
849                            })
850                            .collect(),
851                    ),
852                    GradientValues(gradient_values),
853                )
854            })
855            .map(|(weight, amp_vals, grad_vals)| {
856                (
857                    weight,
858                    self.data_evaluator.expression.evaluate(&amp_vals),
859                    self.data_evaluator
860                        .expression
861                        .evaluate_gradient(&amp_vals, &grad_vals),
862                )
863            })
864            .map(|(w, l, g)| g.map(|gi| gi.re * w / l.re))
865            .sum();
866        #[cfg(not(feature = "rayon"))]
867        let mc_term: DVector<Float> = self
868            .accmc_evaluator
869            .dataset
870            .events
871            .iter()
872            .zip(mc_resources.caches.iter())
873            .map(|(event, cache)| {
874                let mut gradient_values =
875                    vec![DVector::zeros(parameters.len()); self.accmc_evaluator.amplitudes.len()];
876                self.accmc_evaluator
877                    .amplitudes
878                    .iter()
879                    .zip(mc_resources.active.iter())
880                    .zip(gradient_values.iter_mut())
881                    .for_each(|((amp, active), grad)| {
882                        if *active {
883                            amp.compute_gradient(&mc_parameters, event, cache, grad)
884                        }
885                    });
886                (
887                    event.weight,
888                    AmplitudeValues(
889                        self.accmc_evaluator
890                            .amplitudes
891                            .iter()
892                            .zip(mc_resources.active.iter())
893                            .map(|(amp, active)| {
894                                if *active {
895                                    amp.compute(&mc_parameters, event, cache)
896                                } else {
897                                    Complex::ZERO
898                                }
899                            })
900                            .collect(),
901                    ),
902                    GradientValues(gradient_values),
903                )
904            })
905            .map(|(weight, amp_vals, grad_vals)| {
906                (
907                    weight,
908                    self.accmc_evaluator
909                        .expression
910                        .evaluate_gradient(&amp_vals, &grad_vals),
911                )
912            })
913            .map(|(w, g)| w * g.map(|gi| gi.re))
914            .sum();
915        -2.0 * (data_term - mc_term / n_mc)
916    }
917
918    #[cfg(feature = "mpi")]
919    fn evaluate_gradient_mpi(
920        &self,
921        parameters: &[Float],
922        world: &SimpleCommunicator,
923    ) -> DVector<Float> {
924        let local_evaluation_vec = self
925            .evaluate_gradient_local(parameters)
926            .data
927            .as_vec()
928            .to_vec();
929        let mut flattened_result_buffer = vec![0.0; world.size() as usize * parameters.len()];
930        world.all_gather_into(&local_evaluation_vec, &mut flattened_result_buffer);
931        flattened_result_buffer
932            .chunks(parameters.len())
933            .map(DVector::from_row_slice)
934            .sum::<DVector<Float>>()
935    }
936}
937
938impl LikelihoodTerm for NLL {
939    /// Get the list of parameter names in the order they appear in the [`NLL::evaluate`]
940    /// method.
941    fn parameters(&self) -> Vec<String> {
942        self.data_evaluator
943            .resources
944            .read()
945            .parameters
946            .iter()
947            .cloned()
948            .collect()
949    }
950
951    /// Evaluate the stored [`Model`] over the events in the [`Dataset`] stored by the
952    /// [`Evaluator`] with the given values for free parameters. This method takes the
953    /// real part of the given expression (discarding the imaginary part entirely, which
954    /// does not matter if expressions are coherent sums wrapped in [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`). The
955    /// result is given by the following formula:
956    ///
957    /// ```math
958    /// NLL(\vec{p}) = -2 \left(\sum_{e \in \text{Data}} \text{weight}(e) \ln(\mathcal{L}(e)) - \frac{1}{N_{\text{MC}_A}} \sum_{e \in \text{MC}_A} \text{weight}(e) \mathcal{L}(e) \right)
959    /// ```
960    fn evaluate(&self, parameters: &[Float]) -> Float {
961        #[cfg(feature = "mpi")]
962        {
963            if let Some(world) = laddu_core::mpi::get_world() {
964                return self.evaluate_mpi(parameters, &world);
965            }
966        }
967        self.evaluate_local(parameters)
968    }
969
970    /// Evaluate the gradient of the stored [`Model`] over the events in the [`Dataset`]
971    /// stored by the [`Evaluator`] with the given values for free parameters. This method takes the
972    /// real part of the given expression (discarding the imaginary part entirely, which
973    /// does not matter if expressions are coherent sums wrapped in
974    /// [`Expression::norm_sqr`](`laddu_core::Expression::norm_sqr`).
975    fn evaluate_gradient(&self, parameters: &[Float]) -> DVector<Float> {
976        #[cfg(feature = "mpi")]
977        {
978            if let Some(world) = laddu_core::mpi::get_world() {
979                return self.evaluate_gradient_mpi(parameters, &world);
980            }
981        }
982        self.evaluate_gradient_local(parameters)
983    }
984}
985
986#[cfg(feature = "rayon")]
987impl Function<ThreadPool, LadduError> for NLL {
988    fn evaluate(
989        &self,
990        parameters: &[Float],
991        thread_pool: &mut ThreadPool,
992    ) -> Result<Float, LadduError> {
993        Ok(thread_pool.install(|| LikelihoodTerm::evaluate(self, parameters)))
994    }
995    fn gradient(
996        &self,
997        parameters: &[Float],
998        thread_pool: &mut ThreadPool,
999    ) -> Result<DVector<Float>, LadduError> {
1000        Ok(thread_pool.install(|| LikelihoodTerm::evaluate_gradient(self, parameters)))
1001    }
1002}
1003
1004#[cfg(not(feature = "rayon"))]
1005impl Function<(), LadduError> for NLL {
1006    fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
1007        Ok(LikelihoodTerm::evaluate(self, parameters))
1008    }
1009    fn gradient(
1010        &self,
1011        parameters: &[Float],
1012        _user_data: &mut (),
1013    ) -> Result<DVector<Float>, LadduError> {
1014        Ok(LikelihoodTerm::evaluate_gradient(self, parameters))
1015    }
1016}
1017
1018pub(crate) struct LogLikelihood<'a>(&'a NLL);
1019
1020#[cfg(feature = "rayon")]
1021impl Function<ThreadPool, LadduError> for LogLikelihood<'_> {
1022    fn evaluate(
1023        &self,
1024        parameters: &[Float],
1025        thread_pool: &mut ThreadPool,
1026    ) -> Result<Float, LadduError> {
1027        Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
1028    }
1029    fn gradient(
1030        &self,
1031        parameters: &[Float],
1032        thread_pool: &mut ThreadPool,
1033    ) -> Result<DVector<Float>, LadduError> {
1034        Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
1035    }
1036}
1037
1038#[cfg(not(feature = "rayon"))]
1039impl Function<(), LadduError> for LogLikelihood<'_> {
1040    fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
1041        Function::evaluate(self.0, parameters, user_data).map(|res| -res)
1042    }
1043    fn gradient(
1044        &self,
1045        parameters: &[Float],
1046        user_data: &mut (),
1047    ) -> Result<DVector<Float>, LadduError> {
1048        Function::gradient(self.0, parameters, user_data).map(|res| -res)
1049    }
1050}
1051
1052impl NLL {
1053    /// Minimizes the negative log-likelihood using the L-BFGS-B algorithm (by default), a limited-memory
1054    /// quasi-Newton minimizer which supports bounded optimization.
1055    pub fn minimize(
1056        &self,
1057        p0: &[Float],
1058        bounds: Option<Vec<(Float, Float)>>,
1059        options: Option<MinimizerOptions>,
1060    ) -> Result<Status, LadduError> {
1061        let options = options.unwrap_or_default();
1062        let mut m = Minimizer::new(options.algorithm, self.parameters().len())
1063            .with_bounds(bounds)
1064            .with_max_steps(options.max_steps);
1065        for observer in options.observers {
1066            m = m.with_observer(observer);
1067        }
1068        #[cfg(feature = "rayon")]
1069        {
1070            m.minimize(
1071                self,
1072                p0,
1073                &mut ThreadPoolBuilder::new()
1074                    .num_threads(options.threads)
1075                    .build()
1076                    .map_err(LadduError::from)?,
1077            )?;
1078        }
1079        #[cfg(not(feature = "rayon"))]
1080        {
1081            m.minimize(self, p0, &mut ())?;
1082        }
1083        Ok(m.status)
1084    }
1085    /// Perform Markov Chain Monte Carlo sampling on this [`NLL`]. By default, this uses the [`ESS`](`ganesh::algorithms::mcmc::ESS`) sampler.
1086    pub fn mcmc<T: AsRef<[DVector<Float>]>>(
1087        &self,
1088        p0: T,
1089        n_steps: usize,
1090        options: Option<MCMCOptions>,
1091        rng: Rng,
1092    ) -> Result<Ensemble, LadduError> {
1093        let options = options.unwrap_or(MCMCOptions::new_ess(
1094            [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
1095            rng,
1096        ));
1097        let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
1098        for observer in options.observers {
1099            m = m.with_observer(observer);
1100        }
1101        let func = LogLikelihood(self);
1102        #[cfg(feature = "rayon")]
1103        {
1104            m.sample(
1105                &func,
1106                &mut ThreadPoolBuilder::new()
1107                    .num_threads(options.threads)
1108                    .build()
1109                    .map_err(LadduError::from)?,
1110                n_steps,
1111            )?;
1112        }
1113        #[cfg(not(feature = "rayon"))]
1114        {
1115            m.sample(&func, &mut (), n_steps)?;
1116        }
1117        Ok(m.ensemble)
1118    }
1119}
1120
1121/// A (extended) negative log-likelihood evaluator
1122///
1123/// Parameters
1124/// ----------
1125/// model: Model
1126///     The Model to evaluate
1127/// ds_data : Dataset
1128///     A Dataset representing true signal data
1129/// ds_accmc : Dataset
1130///     A Dataset of physically flat accepted Monte Carlo data used for normalization
1131///
1132#[cfg(feature = "python")]
1133#[pyclass(name = "NLL", module = "laddu")]
1134#[derive(Clone)]
1135pub struct PyNLL(pub Box<NLL>);
1136
1137#[cfg(feature = "python")]
1138#[pymethods]
1139impl PyNLL {
1140    #[new]
1141    #[pyo3(signature = (model, ds_data, ds_accmc))]
1142    fn new(model: &PyModel, ds_data: &PyDataset, ds_accmc: &PyDataset) -> Self {
1143        Self(NLL::new(&model.0, &ds_data.0, &ds_accmc.0))
1144    }
1145    /// The underlying signal dataset used in calculating the NLL
1146    ///
1147    /// Returns
1148    /// -------
1149    /// Dataset
1150    ///
1151    #[getter]
1152    fn data(&self) -> PyDataset {
1153        PyDataset(self.0.data_evaluator.dataset.clone())
1154    }
1155    /// The underlying accepted Monte Carlo dataset used in calculating the NLL
1156    ///
1157    /// Returns
1158    /// -------
1159    /// Dataset
1160    ///
1161    #[getter]
1162    fn accmc(&self) -> PyDataset {
1163        PyDataset(self.0.accmc_evaluator.dataset.clone())
1164    }
1165    /// Turn an ``NLL`` into a term that can be used by a ``LikelihoodManager``
1166    ///
1167    /// Returns
1168    /// -------
1169    /// term : LikelihoodTerm
1170    ///     The isolated NLL which can be used to build more complex models
1171    ///
1172    fn as_term(&self) -> PyLikelihoodTerm {
1173        PyLikelihoodTerm(self.0.clone())
1174    }
1175    /// The names of the free parameters used to evaluate the NLL
1176    ///
1177    /// Returns
1178    /// -------
1179    /// parameters : list of str
1180    ///
1181    #[getter]
1182    fn parameters(&self) -> Vec<String> {
1183        self.0.parameters()
1184    }
1185    /// Activates Amplitudes in the NLL by name
1186    ///
1187    /// Parameters
1188    /// ----------
1189    /// arg : str or list of str
1190    ///     Names of Amplitudes to be activated
1191    ///
1192    /// Raises
1193    /// ------
1194    /// TypeError
1195    ///     If `arg` is not a str or list of str
1196    /// ValueError
1197    ///     If `arg` or any items of `arg` are not registered Amplitudes
1198    ///
1199    fn activate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1200        if let Ok(string_arg) = arg.extract::<String>() {
1201            self.0.activate(&string_arg)?;
1202        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1203            let vec: Vec<String> = list_arg.extract()?;
1204            self.0.activate_many(&vec)?;
1205        } else {
1206            return Err(PyTypeError::new_err(
1207                "Argument must be either a string or a list of strings",
1208            ));
1209        }
1210        Ok(())
1211    }
1212    /// Activates all Amplitudes in the JNLL
1213    ///
1214    fn activate_all(&self) {
1215        self.0.activate_all();
1216    }
1217    /// Deactivates Amplitudes in the NLL by name
1218    ///
1219    /// Deactivated Amplitudes act as zeros in the NLL
1220    ///
1221    /// Parameters
1222    /// ----------
1223    /// arg : str or list of str
1224    ///     Names of Amplitudes to be deactivated
1225    ///
1226    /// Raises
1227    /// ------
1228    /// TypeError
1229    ///     If `arg` is not a str or list of str
1230    /// ValueError
1231    ///     If `arg` or any items of `arg` are not registered Amplitudes
1232    ///
1233    fn deactivate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1234        if let Ok(string_arg) = arg.extract::<String>() {
1235            self.0.deactivate(&string_arg)?;
1236        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1237            let vec: Vec<String> = list_arg.extract()?;
1238            self.0.deactivate_many(&vec)?;
1239        } else {
1240            return Err(PyTypeError::new_err(
1241                "Argument must be either a string or a list of strings",
1242            ));
1243        }
1244        Ok(())
1245    }
1246    /// Deactivates all Amplitudes in the NLL
1247    ///
1248    fn deactivate_all(&self) {
1249        self.0.deactivate_all();
1250    }
1251    /// Isolates Amplitudes in the NLL by name
1252    ///
1253    /// Activates the Amplitudes given in `arg` and deactivates the rest
1254    ///
1255    /// Parameters
1256    /// ----------
1257    /// arg : str or list of str
1258    ///     Names of Amplitudes to be isolated
1259    ///
1260    /// Raises
1261    /// ------
1262    /// TypeError
1263    ///     If `arg` is not a str or list of str
1264    /// ValueError
1265    ///     If `arg` or any items of `arg` are not registered Amplitudes
1266    ///
1267    fn isolate(&self, arg: &Bound<'_, PyAny>) -> PyResult<()> {
1268        if let Ok(string_arg) = arg.extract::<String>() {
1269            self.0.isolate(&string_arg)?;
1270        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1271            let vec: Vec<String> = list_arg.extract()?;
1272            self.0.isolate_many(&vec)?;
1273        } else {
1274            return Err(PyTypeError::new_err(
1275                "Argument must be either a string or a list of strings",
1276            ));
1277        }
1278        Ok(())
1279    }
1280    /// Evaluate the extended negative log-likelihood over the stored Datasets
1281    ///
1282    /// This is defined as
1283    ///
1284    /// .. math:: NLL(\vec{p}; D, MC) = -2 \left( \sum_{e \in D} (e_w \log(\mathcal{L}(e))) - \frac{1}{N_{MC}} \sum_{e \in MC} (e_w \mathcal{L}(e)) \right)
1285    ///
1286    /// Parameters
1287    /// ----------
1288    /// parameters : list of float
1289    ///     The values to use for the free parameters
1290    /// threads : int, optional
1291    ///     The number of threads to use (setting this to None will use all available CPUs)
1292    ///
1293    /// Returns
1294    /// -------
1295    /// result : float
1296    ///     The total negative log-likelihood
1297    ///
1298    /// Raises
1299    /// ------
1300    /// Exception
1301    ///     If there was an error building the thread pool
1302    ///
1303    #[pyo3(signature = (parameters, *, threads=None))]
1304    fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
1305        #[cfg(feature = "rayon")]
1306        {
1307            Ok(ThreadPoolBuilder::new()
1308                .num_threads(threads.unwrap_or_else(num_cpus::get))
1309                .build()
1310                .map_err(LadduError::from)?
1311                .install(|| LikelihoodTerm::evaluate(self.0.as_ref(), &parameters)))
1312        }
1313        #[cfg(not(feature = "rayon"))]
1314        {
1315            Ok(LikelihoodTerm::evaluate(self.0.as_ref(), &parameters))
1316        }
1317    }
1318    /// Evaluate the gradient of the negative log-likelihood over the stored Dataset
1319    ///
1320    /// Parameters
1321    /// ----------
1322    /// parameters : list of float
1323    ///     The values to use for the free parameters
1324    /// threads : int, optional
1325    ///     The number of threads to use (setting this to None will use all available CPUs)
1326    ///
1327    /// Returns
1328    /// -------
1329    /// result : array_like
1330    ///     A ``numpy`` array of representing the gradient of the negative log-likelihood over each parameter
1331    ///
1332    /// Raises
1333    /// ------
1334    /// Exception
1335    ///     If there was an error building the thread pool or problem creating the resulting
1336    ///     ``numpy`` array
1337    ///
1338    #[pyo3(signature = (parameters, *, threads=None))]
1339    fn evaluate_gradient<'py>(
1340        &self,
1341        py: Python<'py>,
1342        parameters: Vec<Float>,
1343        threads: Option<usize>,
1344    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1345        #[cfg(feature = "rayon")]
1346        {
1347            Ok(PyArray1::from_slice(
1348                py,
1349                ThreadPoolBuilder::new()
1350                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1351                    .build()
1352                    .map_err(LadduError::from)?
1353                    .install(|| self.0.evaluate_gradient(&parameters))
1354                    .as_slice(),
1355            ))
1356        }
1357        #[cfg(not(feature = "rayon"))]
1358        {
1359            Ok(PyArray1::from_slice(
1360                py,
1361                self.0.evaluate_gradient(&parameters).as_slice(),
1362            ))
1363        }
1364    }
1365    /// Project the model over the Monte Carlo dataset with the given parameter values
1366    ///
1367    /// This is defined as
1368    ///
1369    /// .. math:: e_w(\vec{p}) = \frac{e_w}{N_{MC}} \mathcal{L}(e)
1370    ///
1371    /// Parameters
1372    /// ----------
1373    /// parameters : list of float
1374    ///     The values to use for the free parameters
1375    /// mc_evaluator: Evaluator, optional
1376    ///     Project using the given Evaluator or use the stored ``accmc`` if None
1377    /// threads : int, optional
1378    ///     The number of threads to use (setting this to None will use all available CPUs)
1379    ///
1380    /// Returns
1381    /// -------
1382    /// result : array_like
1383    ///     Weights for every Monte Carlo event which represent the fit to data
1384    ///
1385    /// Raises
1386    /// ------
1387    /// Exception
1388    ///     If there was an error building the thread pool or problem creating the resulting
1389    ///     ``numpy`` array
1390    ///
1391    #[pyo3(signature = (parameters, *, mc_evaluator = None, threads=None))]
1392    fn project<'py>(
1393        &self,
1394        py: Python<'py>,
1395        parameters: Vec<Float>,
1396        mc_evaluator: Option<PyEvaluator>,
1397        threads: Option<usize>,
1398    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1399        #[cfg(feature = "rayon")]
1400        {
1401            Ok(PyArray1::from_slice(
1402                py,
1403                ThreadPoolBuilder::new()
1404                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1405                    .build()
1406                    .map_err(LadduError::from)?
1407                    .install(|| {
1408                        self.0
1409                            .project(&parameters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1410                    })
1411                    .as_slice(),
1412            ))
1413        }
1414        #[cfg(not(feature = "rayon"))]
1415        {
1416            Ok(PyArray1::from_slice(
1417                py,
1418                self.0
1419                    .project(&parameters, mc_evaluator.map(|pyeval| pyeval.0.clone()))
1420                    .as_slice(),
1421            ))
1422        }
1423    }
1424
1425    /// Project the model over the Monte Carlo dataset with the given parameter values, first
1426    /// isolating the given terms by name. The NLL is then reset to its previous state of
1427    /// activation.
1428    ///
1429    /// This is defined as
1430    ///
1431    /// .. math:: e_w(\vec{p}) = \frac{e_w}{N_{MC}} \mathcal{L}(e)
1432    ///
1433    /// Parameters
1434    /// ----------
1435    /// parameters : list of float
1436    ///     The values to use for the free parameters
1437    /// arg : str or list of str
1438    ///     Names of Amplitudes to be isolated
1439    /// mc_evaluator: Evaluator, optional
1440    ///     Project using the given Evaluator or use the stored ``accmc`` if None
1441    /// threads : int, optional
1442    ///     The number of threads to use (setting this to None will use all available CPUs)
1443    ///
1444    /// Returns
1445    /// -------
1446    /// result : array_like
1447    ///     Weights for every Monte Carlo event which represent the fit to data
1448    ///
1449    /// Raises
1450    /// ------
1451    /// TypeError
1452    ///     If `arg` is not a str or list of str
1453    ///
1454    /// Raises
1455    /// ------
1456    /// Exception
1457    ///     If there was an error building the thread pool or problem creating the resulting
1458    ///     ``numpy`` array
1459    /// ValueError
1460    ///     If `arg` or any items of `arg` are not registered Amplitudes
1461    ///
1462    #[pyo3(signature = (parameters, arg, *, mc_evaluator = None, threads=None))]
1463    fn project_with<'py>(
1464        &self,
1465        py: Python<'py>,
1466        parameters: Vec<Float>,
1467        arg: &Bound<'_, PyAny>,
1468        mc_evaluator: Option<PyEvaluator>,
1469        threads: Option<usize>,
1470    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
1471        let names = if let Ok(string_arg) = arg.extract::<String>() {
1472            vec![string_arg]
1473        } else if let Ok(list_arg) = arg.downcast::<PyList>() {
1474            let vec: Vec<String> = list_arg.extract()?;
1475            vec
1476        } else {
1477            return Err(PyTypeError::new_err(
1478                "Argument must be either a string or a list of strings",
1479            ));
1480        };
1481        #[cfg(feature = "rayon")]
1482        {
1483            Ok(PyArray1::from_slice(
1484                py,
1485                ThreadPoolBuilder::new()
1486                    .num_threads(threads.unwrap_or_else(num_cpus::get))
1487                    .build()
1488                    .map_err(LadduError::from)?
1489                    .install(|| {
1490                        self.0.project_with(
1491                            &parameters,
1492                            &names,
1493                            mc_evaluator.map(|pyeval| pyeval.0.clone()),
1494                        )
1495                    })?
1496                    .as_slice(),
1497            ))
1498        }
1499        #[cfg(not(feature = "rayon"))]
1500        {
1501            Ok(PyArray1::from_slice(
1502                py,
1503                self.0
1504                    .project_with(
1505                        &parameters,
1506                        &names,
1507                        mc_evaluator.map(|pyeval| pyeval.0.clone()),
1508                    )?
1509                    .as_slice(),
1510            ))
1511        }
1512    }
1513
1514    /// Minimize the NLL with respect to the free parameters in the model
1515    ///
1516    /// This method "runs the fit". Given an initial position `p0` and optional `bounds`, this
1517    /// method performs a minimization over the negative log-likelihood, optimizing the model
1518    /// over the stored signal data and Monte Carlo.
1519    ///
1520    /// Parameters
1521    /// ----------
1522    /// p0 : array_like
1523    ///     The initial parameters at the start of optimization
1524    /// bounds : list of tuple of float, optional
1525    ///     A list of lower and upper bound pairs for each parameter (use ``None`` for no bound)
1526    /// method : {'lbfgsb', 'nelder-mead', 'nelder_mead'}
1527    ///     The minimization algorithm to use (see additional parameters for fine-tuning)
1528    /// max_steps : int, default=4000
1529    ///     The maximum number of algorithm steps to perform
1530    /// debug : bool, default=False
1531    ///     Set to ``True`` to print out debugging information at each step
1532    /// verbose : bool, default=False
1533    ///     Set to ``True`` to print verbose information at each step
1534    /// show_step : bool, default=True
1535    ///     Include step number in verbose output
1536    /// show_x : bool, default=True
1537    ///     Include current best position in verbose output
1538    /// show_fx : bool, default=True
1539    ///     Include current best NLL in verbose output
1540    /// observers : Observer or list of Observers
1541    ///     Callback functions which are applied after every algorithm step
1542    /// tol_x_rel : float
1543    ///     The relative position tolerance used by termination methods (default is machine
1544    ///     epsilon)
1545    /// tol_x_abs : float
1546    ///     The absolute position tolerance used by termination methods (default is machine
1547    ///     epsilon)
1548    /// tol_f_rel : float
1549    ///     The relative function tolerance used by termination methods (default is machine
1550    ///     epsilon)
1551    /// tol_f_abs : float
1552    ///     The absolute function tolerance used by termination methods (default is machine
1553    ///     epsilon)
1554    /// tol_g_abs : float
1555    ///     The absolute gradient tolerance used by termination methods (default is the cube
1556    ///     root of machine epsilon)
1557    /// g_tolerance : float, default=1e-5
1558    ///     Another gradient tolerance used by termination methods (particularly L-BFGS-B)
1559    /// adaptive : bool, default=False
1560    ///     Use adaptive values for Nelder-Mead parameters
1561    /// alpha : float, optional
1562    ///     Overwrite the default :math:`\alpha` parameter in the Nelder-Mead algorithm
1563    /// beta : float, optional
1564    ///     Overwrite the default :math:`\beta` parameter in the Nelder-Mead algorithm
1565    /// gamma : float, optional
1566    ///     Overwrite the default :math:`\gamma` parameter in the Nelder-Mead algorithm
1567    /// delta : float, optional
1568    ///     Overwrite the default :math:`\delta` parameter in the Nelder-Mead algorithm
1569    /// simplex_expansion_method : {'greedy_minimization', 'greedy_expansion'}
1570    ///     The expansion method used by the Nelder-Mead algorithm
1571    /// nelder_mead_f_terminator : {'stddev', 'absolute', 'stddev', 'none'}
1572    ///     The function terminator used by the Nelder-Mead algorithm
1573    /// nelder_mead_x_terminator : {'singer', 'diameter', 'rowan', 'higham', 'none'}
1574    ///     The positional terminator used by the Nelder-Mead algorithm
1575    /// skip_hessian : bool, default = False
1576    ///     Skip calculation of the Hessian matrix (and parameter errors) at the termination of the
1577    ///     algorithm (use this when uncertainties are not needed/important)
1578    /// threads : int, optional
1579    ///     The number of threads to use (setting this to None will use all available CPUs)
1580    ///
1581    /// Returns
1582    /// -------
1583    /// Status
1584    ///     The status of the minimization algorithm at termination
1585    ///
1586    /// Raises
1587    /// ------
1588    /// Exception
1589    ///     If there was an error building the thread pool
1590    /// ValueError
1591    ///     If any kwargs are invalid
1592    ///
1593    #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
1594    #[allow(clippy::too_many_arguments)]
1595    fn minimize(
1596        &self,
1597        p0: Vec<Float>,
1598        bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
1599        method: &str,
1600        max_steps: usize,
1601        debug: bool,
1602        verbose: bool,
1603        kwargs: Option<&Bound<'_, PyDict>>,
1604    ) -> PyResult<PyStatus> {
1605        let bounds = bounds.map(|bounds_vec| {
1606            bounds_vec
1607                .iter()
1608                .map(|(opt_lb, opt_ub)| {
1609                    (
1610                        opt_lb.unwrap_or(Float::NEG_INFINITY),
1611                        opt_ub.unwrap_or(Float::INFINITY),
1612                    )
1613                })
1614                .collect()
1615        });
1616        let n_parameters = p0.len();
1617        let options =
1618            py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
1619        let status = self.0.minimize(&p0, bounds, Some(options))?;
1620        Ok(PyStatus(status))
1621    }
1622    /// Run an MCMC algorithm on the free parameters of the NLL's model
1623    ///
1624    /// This method can be used to sample the underlying log-likelihood given an initial
1625    /// position for each walker `p0`.
1626    ///
1627    /// Parameters
1628    /// ----------
1629    /// p0 : array_like
1630    ///     The initial parameters at the start of optimization
1631    /// n_steps : int,
1632    ///     The number of MCMC steps each walker should take
1633    /// method : {'ESS', 'AIES'}
1634    ///     The MCMC algorithm to use (see additional parameters for fine-tuning)
1635    /// debug : bool, default=False
1636    ///     Set to ``True`` to print out debugging information at each step
1637    /// verbose : bool, default=False
1638    ///     Set to ``True`` to print verbose information at each step
1639    /// seed : int,
1640    ///     The seed for the random number generator
1641    /// ess_moves : list of tuple
1642    ///     A list of moves for the ESS algorithm (see notes)
1643    /// aies_moves : list of tuple
1644    ///     A list of moves for the AIES algorithm (see notes)
1645    /// n_adaptive : int, default=100
1646    ///     Number of adaptive ESS steps to perform at the start of sampling
1647    /// mu : float, default=1.0
1648    ///     ESS adaptive parameter
1649    /// max_ess_steps : int, default=10000
1650    ///     The maximum number of slice expansions/contractions performed in the ESS algorithm
1651    /// threads : int, optional
1652    ///     The number of threads to use (setting this to None will use all available CPUs)
1653    ///
1654    /// Returns
1655    /// -------
1656    /// Ensemble
1657    ///     The resulting ensemble of walkers
1658    ///
1659    /// Raises
1660    /// ------
1661    /// Exception
1662    ///     If there was an error building the thread pool
1663    /// ValueError
1664    ///     If any kwargs are invalid
1665    ///
1666    /// Notes
1667    /// -----
1668    /// Moves may be specified as tuples of ``(move name, usage weight)`` where the move name
1669    /// depends on the algorithm and the usage weight gives the proportion of time that move is
1670    /// used relative to the others in the list.
1671    ///
1672    /// For the Ensemble Slice Sampler (ESS) algorithm, valid move types are "differential" and
1673    /// "gaussian", and the default move set is ``[("differential", 0.9), ("gaussian", 0.1)]``.
1674    ///
1675    /// For the Affine Invariant Ensemble Sampler (AIES) algorithm, valid move types are
1676    /// "stretch" and "walk", and the default move set is ``[("stretch", 0.9), ("walk", 0.1)]``.
1677    ///
1678    /// For AIES, the "stretch" move can also be given with an adaptive parameter ``a``
1679    /// (default=``2``). To add a stretch move with a different value of ``a``, the "move name"
1680    /// can be instead given as a tuple ``(move name, a)``. For example, ``(("stretch", 2.2), 0.3)``
1681    /// creates a stretch move with ``a=2.2`` and usage weight of ``0.3``.
1682    ///
1683    /// Since MCMC methods are inclined to sample maxima rather than minima, the underlying
1684    /// function sign is automatically flipped when calling this method.
1685    ///
1686    #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
1687    #[allow(clippy::too_many_arguments)]
1688    fn mcmc(
1689        &self,
1690        p0: Vec<Vec<Float>>,
1691        n_steps: usize,
1692        method: &str,
1693        debug: bool,
1694        verbose: bool,
1695        seed: u64,
1696        kwargs: Option<&Bound<'_, PyDict>>,
1697    ) -> PyResult<PyEnsemble> {
1698        let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
1699        let mut rng = Rng::new();
1700        rng.seed(seed);
1701        let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
1702        let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
1703        Ok(PyEnsemble(ensemble))
1704    }
1705}
1706
1707/// An identifier that can be used like an [`AmplitudeID`](`laddu_core::amplitudes::AmplitudeID`) to combine registered
1708/// [`LikelihoodTerm`]s.
1709#[derive(Clone, Debug)]
1710pub struct LikelihoodID(usize);
1711
1712impl Display for LikelihoodID {
1713    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1714        write!(f, "{}", self.0)
1715    }
1716}
1717
1718/// An object which holds a registered ``LikelihoodTerm``
1719///
1720/// See Also
1721/// --------
1722/// laddu.LikelihoodManager.register
1723///
1724#[cfg(feature = "python")]
1725#[pyclass(name = "LikelihoodID", module = "laddu")]
1726#[derive(Clone)]
1727pub struct PyLikelihoodID(LikelihoodID);
1728
1729#[cfg(feature = "python")]
1730#[pymethods]
1731impl PyLikelihoodID {
1732    fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1733        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1734            Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
1735        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1736            Ok(PyLikelihoodExpression(
1737                self.0.clone() + other_expr.0.clone(),
1738            ))
1739        } else if let Ok(int) = other.extract::<usize>() {
1740            if int == 0 {
1741                Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1742                    self.0.clone(),
1743                )))
1744            } else {
1745                Err(PyTypeError::new_err(
1746                    "Addition with an integer for this type is only defined for 0",
1747                ))
1748            }
1749        } else {
1750            Err(PyTypeError::new_err("Unsupported operand type for +"))
1751        }
1752    }
1753    fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1754        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1755            Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
1756        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1757            Ok(PyLikelihoodExpression(
1758                other_expr.0.clone() + self.0.clone(),
1759            ))
1760        } else if let Ok(int) = other.extract::<usize>() {
1761            if int == 0 {
1762                Ok(PyLikelihoodExpression(LikelihoodExpression::Term(
1763                    self.0.clone(),
1764                )))
1765            } else {
1766                Err(PyTypeError::new_err(
1767                    "Addition with an integer for this type is only defined for 0",
1768                ))
1769            }
1770        } else {
1771            Err(PyTypeError::new_err("Unsupported operand type for +"))
1772        }
1773    }
1774    fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1775        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1776            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
1777        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1778            Ok(PyLikelihoodExpression(
1779                self.0.clone() * other_expr.0.clone(),
1780            ))
1781        } else {
1782            Err(PyTypeError::new_err("Unsupported operand type for *"))
1783        }
1784    }
1785    fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
1786        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
1787            Ok(PyLikelihoodExpression(other_aid.0.clone() * self.0.clone()))
1788        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
1789            Ok(PyLikelihoodExpression(
1790                other_expr.0.clone() * self.0.clone(),
1791            ))
1792        } else {
1793            Err(PyTypeError::new_err("Unsupported operand type for *"))
1794        }
1795    }
1796    fn __str__(&self) -> String {
1797        format!("{}", self.0)
1798    }
1799    fn __repr__(&self) -> String {
1800        format!("{:?}", self.0)
1801    }
1802}
1803
1804/// A [`Manager`](`laddu_core::Manager`) but for [`LikelihoodTerm`]s.
1805#[derive(Default, Clone)]
1806pub struct LikelihoodManager {
1807    terms: Vec<Box<dyn LikelihoodTerm>>,
1808    param_name_to_index: HashMap<String, usize>,
1809    param_names: Vec<String>,
1810    param_layouts: Vec<Vec<usize>>,
1811    param_counts: Vec<usize>,
1812}
1813
1814impl LikelihoodManager {
1815    /// Register a [`LikelihoodTerm`] to get a [`LikelihoodID`] which can be combined with others
1816    /// to form [`LikelihoodExpression`]s which can be minimized.
1817    pub fn register(&mut self, term: Box<dyn LikelihoodTerm>) -> LikelihoodID {
1818        let term_idx = self.terms.len();
1819        for param_name in term.parameters() {
1820            if !self.param_name_to_index.contains_key(&param_name) {
1821                self.param_name_to_index
1822                    .insert(param_name.clone(), self.param_name_to_index.len());
1823                self.param_names.push(param_name.clone());
1824            }
1825        }
1826        let param_layout: Vec<usize> = term
1827            .parameters()
1828            .iter()
1829            .map(|name| self.param_name_to_index[name])
1830            .collect();
1831        let param_count = term.parameters().len();
1832        self.param_layouts.push(param_layout);
1833        self.param_counts.push(param_count);
1834        self.terms.push(term.clone());
1835
1836        LikelihoodID(term_idx)
1837    }
1838
1839    /// Return all of the parameter names of registered [`LikelihoodTerm`]s in order. This only
1840    /// returns the unique names in the order they should be input when evaluated with a
1841    /// [`LikelihoodEvaluator`].
1842    pub fn parameters(&self) -> Vec<String> {
1843        self.param_names.clone()
1844    }
1845
1846    /// Load a [`LikelihoodExpression`] to generate a [`LikelihoodEvaluator`] that can be
1847    /// minimized.
1848    pub fn load(&self, likelihood_expression: &LikelihoodExpression) -> LikelihoodEvaluator {
1849        LikelihoodEvaluator {
1850            likelihood_manager: self.clone(),
1851            likelihood_expression: likelihood_expression.clone(),
1852        }
1853    }
1854}
1855
1856/// A class which can be used to register LikelihoodTerms and store precalculated data
1857///
1858#[cfg(feature = "python")]
1859#[pyclass(name = "LikelihoodManager", module = "laddu")]
1860#[derive(Clone)]
1861pub struct PyLikelihoodManager(LikelihoodManager);
1862
1863#[cfg(feature = "python")]
1864#[pymethods]
1865impl PyLikelihoodManager {
1866    #[new]
1867    fn new() -> Self {
1868        Self(LikelihoodManager::default())
1869    }
1870    /// Register a LikelihoodTerm with the LikelihoodManager
1871    ///
1872    /// Parameters
1873    /// ----------
1874    /// term : LikelihoodTerm
1875    ///     The LikelihoodTerm to register
1876    ///
1877    /// Returns
1878    /// -------
1879    /// LikelihoodID
1880    ///     A reference to the registered ``likelihood`` that can be used to form complex
1881    ///     LikelihoodExpressions
1882    ///
1883    fn register(&mut self, likelihood_term: &PyLikelihoodTerm) -> PyLikelihoodID {
1884        PyLikelihoodID(self.0.register(likelihood_term.0.clone()))
1885    }
1886    /// The free parameters used by all terms in the LikelihoodManager
1887    ///
1888    /// Returns
1889    /// -------
1890    /// parameters : list of str
1891    ///     The list of parameter names
1892    ///
1893    fn parameters(&self) -> Vec<String> {
1894        self.0.parameters()
1895    }
1896    /// Load a LikelihoodExpression by precalculating each term over their internal Datasets
1897    ///
1898    /// Parameters
1899    /// ----------
1900    /// likelihood_expression : LikelihoodExpression
1901    ///     The expression to use in precalculation
1902    ///
1903    /// Returns
1904    /// -------
1905    /// LikelihoodEvaluator
1906    ///     An object that can be used to evaluate the `likelihood_expression` over all managed
1907    ///     terms
1908    ///
1909    /// Notes
1910    /// -----
1911    /// While the given `likelihood_expression` will be the one evaluated in the end, all registered
1912    /// Amplitudes will be loaded, and all of their parameters will be included in the final
1913    /// expression. These parameters will have no effect on evaluation, but they must be
1914    /// included in function calls.
1915    ///
1916    /// See Also
1917    /// --------
1918    /// LikelihoodManager.parameters
1919    ///
1920    fn load(&self, likelihood_expression: &PyLikelihoodExpression) -> PyLikelihoodEvaluator {
1921        PyLikelihoodEvaluator(self.0.load(&likelihood_expression.0))
1922    }
1923}
1924
1925#[derive(Debug)]
1926struct LikelihoodValues(Vec<Float>);
1927
1928#[derive(Debug)]
1929struct LikelihoodGradients(Vec<DVector<Float>>);
1930
1931/// A combination of [`LikelihoodTerm`]s as well as sums and products of them.
1932#[derive(Clone)]
1933pub enum LikelihoodExpression {
1934    /// A registered [`LikelihoodTerm`] referenced by an [`LikelihoodID`].
1935    Term(LikelihoodID),
1936    /// The sum of two [`LikelihoodExpression`]s.
1937    Add(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1938    /// The product of two [`LikelihoodExpression`]s.
1939    Mul(Box<LikelihoodExpression>, Box<LikelihoodExpression>),
1940}
1941
1942impl Debug for LikelihoodExpression {
1943    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1944        self.write_tree(f, "", "", "")
1945    }
1946}
1947
1948impl Display for LikelihoodExpression {
1949    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1950        write!(f, "{:?}", self)
1951    }
1952}
1953
1954impl LikelihoodExpression {
1955    fn evaluate(&self, likelihood_values: &LikelihoodValues) -> Float {
1956        match self {
1957            LikelihoodExpression::Term(lid) => likelihood_values.0[lid.0],
1958            LikelihoodExpression::Add(a, b) => {
1959                a.evaluate(likelihood_values) + b.evaluate(likelihood_values)
1960            }
1961            LikelihoodExpression::Mul(a, b) => {
1962                a.evaluate(likelihood_values) * b.evaluate(likelihood_values)
1963            }
1964        }
1965    }
1966    fn evaluate_gradient(
1967        &self,
1968        likelihood_values: &LikelihoodValues,
1969        likelihood_gradients: &LikelihoodGradients,
1970    ) -> DVector<Float> {
1971        match self {
1972            LikelihoodExpression::Term(lid) => likelihood_gradients.0[lid.0].clone(),
1973            LikelihoodExpression::Add(a, b) => {
1974                a.evaluate_gradient(likelihood_values, likelihood_gradients)
1975                    + b.evaluate_gradient(likelihood_values, likelihood_gradients)
1976            }
1977            LikelihoodExpression::Mul(a, b) => {
1978                a.evaluate_gradient(likelihood_values, likelihood_gradients)
1979                    * b.evaluate(likelihood_values)
1980                    + b.evaluate_gradient(likelihood_values, likelihood_gradients)
1981                        * a.evaluate(likelihood_values)
1982            }
1983        }
1984    }
1985    /// Credit to Daniel Janus: <https://blog.danieljanus.pl/2023/07/20/iterating-trees/>
1986    fn write_tree(
1987        &self,
1988        f: &mut std::fmt::Formatter<'_>,
1989        parent_prefix: &str,
1990        immediate_prefix: &str,
1991        parent_suffix: &str,
1992    ) -> std::fmt::Result {
1993        let display_string = match self {
1994            // TODO: maybe come up with a way to name likelihood terms?
1995            Self::Term(lid) => format!("{}", lid.0),
1996            Self::Add(_, _) => "+".to_string(),
1997            Self::Mul(_, _) => "*".to_string(),
1998        };
1999        writeln!(f, "{}{}{}", parent_prefix, immediate_prefix, display_string)?;
2000        match self {
2001            Self::Term(_) => {}
2002            Self::Add(a, b) | Self::Mul(a, b) => {
2003                let terms = [a, b];
2004                let mut it = terms.iter().peekable();
2005                let child_prefix = format!("{}{}", parent_prefix, parent_suffix);
2006                while let Some(child) = it.next() {
2007                    match it.peek() {
2008                        Some(_) => child.write_tree(f, &child_prefix, "├─ ", "│  "),
2009                        None => child.write_tree(f, &child_prefix, "└─ ", "   "),
2010                    }?;
2011                }
2012            }
2013        }
2014        Ok(())
2015    }
2016}
2017
2018impl_op_ex!(+ |a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(a.clone()), Box::new(b.clone()))});
2019impl_op_ex!(
2020    *|a: &LikelihoodExpression, b: &LikelihoodExpression| -> LikelihoodExpression {
2021        LikelihoodExpression::Mul(Box::new(a.clone()), Box::new(b.clone()))
2022    }
2023);
2024impl_op_ex_commutative!(+ |a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(b.clone()))});
2025impl_op_ex_commutative!(
2026    *|a: &LikelihoodID, b: &LikelihoodExpression| -> LikelihoodExpression {
2027        LikelihoodExpression::Mul(
2028            Box::new(LikelihoodExpression::Term(a.clone())),
2029            Box::new(b.clone()),
2030        )
2031    }
2032);
2033impl_op_ex!(+ |a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression { LikelihoodExpression::Add(Box::new(LikelihoodExpression::Term(a.clone())), Box::new(LikelihoodExpression::Term(b.clone())))});
2034impl_op_ex!(
2035    *|a: &LikelihoodID, b: &LikelihoodID| -> LikelihoodExpression {
2036        LikelihoodExpression::Mul(
2037            Box::new(LikelihoodExpression::Term(a.clone())),
2038            Box::new(LikelihoodExpression::Term(b.clone())),
2039        )
2040    }
2041);
2042
2043/// A mathematical expression formed from LikelihoodIDs
2044///
2045#[cfg(feature = "python")]
2046#[pyclass(name = "LikelihoodExpression", module = "laddu")]
2047#[derive(Clone)]
2048pub struct PyLikelihoodExpression(LikelihoodExpression);
2049
2050#[cfg(feature = "python")]
2051#[pymethods]
2052impl PyLikelihoodExpression {
2053    fn __add__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2054        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2055            Ok(PyLikelihoodExpression(self.0.clone() + other_aid.0.clone()))
2056        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2057            Ok(PyLikelihoodExpression(
2058                self.0.clone() + other_expr.0.clone(),
2059            ))
2060        } else if let Ok(int) = other.extract::<usize>() {
2061            if int == 0 {
2062                Ok(PyLikelihoodExpression(self.0.clone()))
2063            } else {
2064                Err(PyTypeError::new_err(
2065                    "Addition with an integer for this type is only defined for 0",
2066                ))
2067            }
2068        } else {
2069            Err(PyTypeError::new_err("Unsupported operand type for +"))
2070        }
2071    }
2072    fn __radd__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2073        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2074            Ok(PyLikelihoodExpression(other_aid.0.clone() + self.0.clone()))
2075        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2076            Ok(PyLikelihoodExpression(
2077                other_expr.0.clone() + self.0.clone(),
2078            ))
2079        } else if let Ok(int) = other.extract::<usize>() {
2080            if int == 0 {
2081                Ok(PyLikelihoodExpression(self.0.clone()))
2082            } else {
2083                Err(PyTypeError::new_err(
2084                    "Addition with an integer for this type is only defined for 0",
2085                ))
2086            }
2087        } else {
2088            Err(PyTypeError::new_err("Unsupported operand type for +"))
2089        }
2090    }
2091    fn __mul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2092        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2093            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2094        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2095            Ok(PyLikelihoodExpression(
2096                self.0.clone() * other_expr.0.clone(),
2097            ))
2098        } else {
2099            Err(PyTypeError::new_err("Unsupported operand type for *"))
2100        }
2101    }
2102    fn __rmul__(&self, other: &Bound<'_, PyAny>) -> PyResult<PyLikelihoodExpression> {
2103        if let Ok(other_aid) = other.extract::<PyRef<PyLikelihoodID>>() {
2104            Ok(PyLikelihoodExpression(self.0.clone() * other_aid.0.clone()))
2105        } else if let Ok(other_expr) = other.extract::<PyLikelihoodExpression>() {
2106            Ok(PyLikelihoodExpression(
2107                self.0.clone() * other_expr.0.clone(),
2108            ))
2109        } else {
2110            Err(PyTypeError::new_err("Unsupported operand type for *"))
2111        }
2112    }
2113    fn __str__(&self) -> String {
2114        format!("{}", self.0)
2115    }
2116    fn __repr__(&self) -> String {
2117        format!("{:?}", self.0)
2118    }
2119}
2120
2121/// A structure to evaluate and minimize combinations of [`LikelihoodTerm`]s.
2122pub struct LikelihoodEvaluator {
2123    likelihood_manager: LikelihoodManager,
2124    likelihood_expression: LikelihoodExpression,
2125}
2126
2127#[cfg(feature = "rayon")]
2128impl Function<ThreadPool, LadduError> for LikelihoodEvaluator {
2129    fn evaluate(
2130        &self,
2131        parameters: &[Float],
2132        thread_pool: &mut ThreadPool,
2133    ) -> Result<Float, LadduError> {
2134        thread_pool.install(|| self.evaluate(parameters))
2135    }
2136    fn gradient(
2137        &self,
2138        parameters: &[Float],
2139        thread_pool: &mut ThreadPool,
2140    ) -> Result<DVector<Float>, LadduError> {
2141        thread_pool.install(|| self.evaluate_gradient(parameters))
2142    }
2143}
2144
2145#[cfg(not(feature = "rayon"))]
2146impl Function<(), LadduError> for LikelihoodEvaluator {
2147    fn evaluate(&self, parameters: &[Float], _user_data: &mut ()) -> Result<Float, LadduError> {
2148        self.evaluate(parameters)
2149    }
2150    fn gradient(
2151        &self,
2152        parameters: &[Float],
2153        _user_data: &mut (),
2154    ) -> Result<DVector<Float>, LadduError> {
2155        self.evaluate_gradient(parameters)
2156    }
2157}
2158
2159pub(crate) struct NegativeLikelihoodEvaluator<'a>(&'a LikelihoodEvaluator);
2160#[cfg(feature = "rayon")]
2161impl Function<ThreadPool, LadduError> for NegativeLikelihoodEvaluator<'_> {
2162    fn evaluate(
2163        &self,
2164        parameters: &[Float],
2165        thread_pool: &mut ThreadPool,
2166    ) -> Result<Float, LadduError> {
2167        Function::evaluate(self.0, parameters, thread_pool).map(|res| -res)
2168    }
2169    fn gradient(
2170        &self,
2171        parameters: &[Float],
2172        thread_pool: &mut ThreadPool,
2173    ) -> Result<DVector<Float>, LadduError> {
2174        Function::gradient(self.0, parameters, thread_pool).map(|res| -res)
2175    }
2176}
2177
2178#[cfg(not(feature = "rayon"))]
2179impl Function<(), LadduError> for NegativeLikelihoodEvaluator<'_> {
2180    fn evaluate(&self, parameters: &[Float], user_data: &mut ()) -> Result<Float, LadduError> {
2181        Function::evaluate(self.0, parameters, user_data).map(|res| -res)
2182    }
2183    fn gradient(
2184        &self,
2185        parameters: &[Float],
2186        user_data: &mut (),
2187    ) -> Result<DVector<Float>, LadduError> {
2188        Function::gradient(self.0, parameters, user_data).map(|res| -res)
2189    }
2190}
2191
2192impl LikelihoodEvaluator {
2193    /// The parameter names used in [`LikelihoodEvaluator::evaluate`]'s input in order.
2194    pub fn parameters(&self) -> Vec<String> {
2195        self.likelihood_manager.parameters()
2196    }
2197    /// A function that can be called to evaluate the sum/product of the [`LikelihoodTerm`]s
2198    /// contained by this [`LikelihoodEvaluator`].
2199    pub fn evaluate(&self, parameters: &[Float]) -> Result<Float, LadduError> {
2200        let mut param_buffers: Vec<Vec<Float>> = self
2201            .likelihood_manager
2202            .param_counts
2203            .iter()
2204            .map(|&count| vec![0.0; count])
2205            .collect();
2206        for (layout, buffer) in self
2207            .likelihood_manager
2208            .param_layouts
2209            .iter()
2210            .zip(param_buffers.iter_mut())
2211        {
2212            for (buffer_idx, &param_idx) in layout.iter().enumerate() {
2213                buffer[buffer_idx] = parameters[param_idx];
2214            }
2215        }
2216        let likelihood_values = LikelihoodValues(
2217            self.likelihood_manager
2218                .terms
2219                .iter()
2220                .zip(param_buffers.iter())
2221                .map(|(term, buffer)| term.evaluate(buffer))
2222                .collect(),
2223        );
2224        Ok(self.likelihood_expression.evaluate(&likelihood_values))
2225    }
2226
2227    /// Evaluate the gradient of the stored [`LikelihoodExpression`] over the events in the [`Dataset`]
2228    /// stored by the [`LikelihoodEvaluator`] with the given values for free parameters.
2229    pub fn evaluate_gradient(&self, parameters: &[Float]) -> Result<DVector<Float>, LadduError> {
2230        let mut param_buffers: Vec<Vec<Float>> = self
2231            .likelihood_manager
2232            .param_counts
2233            .iter()
2234            .map(|&count| vec![0.0; count])
2235            .collect();
2236        for (layout, buffer) in self
2237            .likelihood_manager
2238            .param_layouts
2239            .iter()
2240            .zip(param_buffers.iter_mut())
2241        {
2242            for (buffer_idx, &param_idx) in layout.iter().enumerate() {
2243                buffer[buffer_idx] = parameters[param_idx];
2244            }
2245        }
2246        let likelihood_values = LikelihoodValues(
2247            self.likelihood_manager
2248                .terms
2249                .iter()
2250                .zip(param_buffers.iter())
2251                .map(|(term, buffer)| term.evaluate(buffer))
2252                .collect(),
2253        );
2254        let mut gradient_buffers: Vec<DVector<Float>> = (0..self.likelihood_manager.terms.len())
2255            .map(|_| DVector::zeros(self.likelihood_manager.param_names.len()))
2256            .collect();
2257        for (((term, param_buffer), gradient_buffer), layout) in self
2258            .likelihood_manager
2259            .terms
2260            .iter()
2261            .zip(param_buffers.iter())
2262            .zip(gradient_buffers.iter_mut())
2263            .zip(self.likelihood_manager.param_layouts.iter())
2264        {
2265            let term_gradient = term.evaluate_gradient(param_buffer); // This has a local layout
2266            for (term_idx, &buffer_idx) in layout.iter().enumerate() {
2267                gradient_buffer[buffer_idx] = term_gradient[term_idx] // This has a global layout
2268            }
2269        }
2270        let likelihood_gradients = LikelihoodGradients(gradient_buffers);
2271        Ok(self
2272            .likelihood_expression
2273            .evaluate_gradient(&likelihood_values, &likelihood_gradients))
2274    }
2275
2276    /// A function that can be called to minimize the sum/product of the [`LikelihoodTerm`]s
2277    /// contained by this [`LikelihoodEvaluator`].
2278    ///
2279    /// See [`NLL::minimize`] for more details.
2280    pub fn minimize(
2281        &self,
2282        p0: &[Float],
2283        bounds: Option<Vec<(Float, Float)>>,
2284        options: Option<MinimizerOptions>,
2285    ) -> Result<Status, LadduError> {
2286        let options = options.unwrap_or_default();
2287        let mut m = Minimizer::new(options.algorithm, self.parameters().len())
2288            .with_bounds(bounds)
2289            .with_max_steps(options.max_steps);
2290        for observer in options.observers {
2291            m = m.with_observer(observer)
2292        }
2293        #[cfg(feature = "rayon")]
2294        {
2295            m.minimize(
2296                self,
2297                p0,
2298                &mut ThreadPoolBuilder::new()
2299                    .num_threads(options.threads)
2300                    .build()
2301                    .map_err(LadduError::from)?,
2302            )?;
2303        }
2304        #[cfg(not(feature = "rayon"))]
2305        {
2306            m.minimize(self, p0, &mut ())?;
2307        }
2308        Ok(m.status)
2309    }
2310
2311    /// A function that can be called to perform Markov Chain Monte Carlo sampling
2312    /// of the sum/product of the [`LikelihoodTerm`]s
2313    /// contained by this [`LikelihoodEvaluator`].
2314    ///
2315    /// See [`NLL::mcmc`] for more details.
2316    pub fn mcmc<T: AsRef<[DVector<Float>]>>(
2317        &self,
2318        p0: T,
2319        n_steps: usize,
2320        options: Option<MCMCOptions>,
2321        rng: Rng,
2322    ) -> Result<Ensemble, LadduError> {
2323        let options = options.unwrap_or(MCMCOptions::new_ess(
2324            [ESSMove::differential(0.9), ESSMove::gaussian(0.1)],
2325            rng,
2326        ));
2327        let mut m = Sampler::new(options.algorithm, p0.as_ref().to_vec());
2328        for observer in options.observers {
2329            m = m.with_observer(observer);
2330        }
2331        let func = NegativeLikelihoodEvaluator(self);
2332        #[cfg(feature = "rayon")]
2333        {
2334            m.sample(
2335                &func,
2336                &mut ThreadPoolBuilder::new()
2337                    .num_threads(options.threads)
2338                    .build()
2339                    .map_err(LadduError::from)?,
2340                n_steps,
2341            )?;
2342        }
2343        #[cfg(not(feature = "rayon"))]
2344        {
2345            m.sample(&func, &mut (), n_steps)?;
2346        }
2347        Ok(m.ensemble)
2348    }
2349}
2350
2351/// A class which can be used to evaluate a collection of LikelihoodTerms managed by a
2352/// LikelihoodManager
2353///
2354#[cfg(feature = "python")]
2355#[pyclass(name = "LikelihoodEvaluator", module = "laddu")]
2356pub struct PyLikelihoodEvaluator(LikelihoodEvaluator);
2357
2358#[cfg(feature = "python")]
2359#[pymethods]
2360impl PyLikelihoodEvaluator {
2361    /// A list of the names of the free parameters across all terms in all models
2362    ///
2363    /// Returns
2364    /// -------
2365    /// parameters : list of str
2366    ///
2367    #[getter]
2368    fn parameters(&self) -> Vec<String> {
2369        self.0.parameters()
2370    }
2371    /// Evaluate the sum of all terms in the evaluator
2372    ///
2373    /// Parameters
2374    /// ----------
2375    /// parameters : list of float
2376    ///     The values to use for the free parameters
2377    /// threads : int, optional
2378    ///     The number of threads to use (setting this to None will use all available CPUs)
2379    ///
2380    /// Returns
2381    /// -------
2382    /// result : float
2383    ///     The total negative log-likelihood summed over all terms
2384    ///
2385    /// Raises
2386    /// ------
2387    /// Exception
2388    ///     If there was an error building the thread pool
2389    ///
2390    #[pyo3(signature = (parameters, *, threads=None))]
2391    fn evaluate(&self, parameters: Vec<Float>, threads: Option<usize>) -> PyResult<Float> {
2392        #[cfg(feature = "rayon")]
2393        {
2394            Ok(ThreadPoolBuilder::new()
2395                .num_threads(threads.unwrap_or_else(num_cpus::get))
2396                .build()
2397                .map_err(LadduError::from)?
2398                .install(|| self.0.evaluate(&parameters))?)
2399        }
2400        #[cfg(not(feature = "rayon"))]
2401        {
2402            Ok(self.0.evaluate(&parameters)?)
2403        }
2404    }
2405    /// Evaluate the gradient of the sum of all terms in the evaluator
2406    ///
2407    /// Parameters
2408    /// ----------
2409    /// parameters : list of float
2410    ///     The values to use for the free parameters
2411    /// threads : int, optional
2412    ///     The number of threads to use (setting this to None will use all available CPUs)
2413    ///
2414    /// Returns
2415    /// -------
2416    /// result : array_like
2417    ///     A ``numpy`` array of representing the gradient of the sum of all terms in the
2418    ///     evaluator
2419    ///
2420    /// Raises
2421    /// ------
2422    /// Exception
2423    ///     If there was an error building the thread pool or problem creating the resulting
2424    ///     ``numpy`` array
2425    ///
2426    #[pyo3(signature = (parameters, *, threads=None))]
2427    fn evaluate_gradient<'py>(
2428        &self,
2429        py: Python<'py>,
2430        parameters: Vec<Float>,
2431        threads: Option<usize>,
2432    ) -> PyResult<Bound<'py, PyArray1<Float>>> {
2433        #[cfg(feature = "rayon")]
2434        {
2435            Ok(PyArray1::from_slice(
2436                py,
2437                ThreadPoolBuilder::new()
2438                    .num_threads(threads.unwrap_or_else(num_cpus::get))
2439                    .build()
2440                    .map_err(LadduError::from)?
2441                    .install(|| self.0.evaluate_gradient(&parameters))?
2442                    .as_slice(),
2443            ))
2444        }
2445        #[cfg(not(feature = "rayon"))]
2446        {
2447            Ok(PyArray1::from_slice(
2448                py,
2449                self.0.evaluate_gradient(&parameters)?.as_slice(),
2450            ))
2451        }
2452    }
2453
2454    /// Minimize all LikelihoodTerms with respect to the free parameters in the model
2455    ///
2456    /// This method "runs the fit". Given an initial position `p0` and optional `bounds`, this
2457    /// method performs a minimization over the tatal negative log-likelihood, optimizing the model
2458    /// over the stored signal data and Monte Carlo.
2459    ///
2460    /// Parameters
2461    /// ----------
2462    /// p0 : array_like
2463    ///     The initial parameters at the start of optimization
2464    /// bounds : list of tuple of float, optional
2465    ///     A list of lower and upper bound pairs for each parameter (use ``None`` for no bound)
2466    /// method : {'lbfgsb', 'nelder-mead', 'nelder_mead'}
2467    ///     The minimization algorithm to use (see additional parameters for fine-tuning)
2468    /// max_steps : int, default=4000
2469    ///     The maximum number of algorithm steps to perform
2470    /// debug : bool, default=False
2471    ///     Set to ``True`` to print out debugging information at each step
2472    /// verbose : bool, default=False
2473    ///     Set to ``True`` to print verbose information at each step
2474    /// show_step : bool, default=True
2475    ///     Include step number in verbose output
2476    /// show_x : bool, default=True
2477    ///     Include current best position in verbose output
2478    /// show_fx : bool, default=True
2479    ///     Include current best NLL in verbose output
2480    /// observers : Observer or list of Observers
2481    ///     Callback functions which are applied after every algorithm step
2482    /// tol_x_rel : float
2483    ///     The relative position tolerance used by termination methods (default is machine
2484    ///     epsilon)
2485    /// tol_x_abs : float
2486    ///     The absolute position tolerance used by termination methods (default is machine
2487    ///     epsilon)
2488    /// tol_f_rel : float
2489    ///     The relative function tolerance used by termination methods (default is machine
2490    ///     epsilon)
2491    /// tol_f_abs : float
2492    ///     The absolute function tolerance used by termination methods (default is machine
2493    ///     epsilon)
2494    /// tol_g_abs : float
2495    ///     The absolute gradient tolerance used by termination methods (default is the cube
2496    ///     root of machine epsilon)
2497    /// g_tolerance : float, default=1e-5
2498    ///     Another gradient tolerance used by termination methods (particularly L-BFGS-B)
2499    /// adaptive : bool, default=False
2500    ///     Use adaptive values for Nelder-Mead parameters
2501    /// alpha : float, optional
2502    ///     Overwrite the default :math:`\alpha` parameter in the Nelder-Mead algorithm
2503    /// beta : float, optional
2504    ///     Overwrite the default :math:`\beta` parameter in the Nelder-Mead algorithm
2505    /// gamma : float, optional
2506    ///     Overwrite the default :math:`\gamma` parameter in the Nelder-Mead algorithm
2507    /// delta : float, optional
2508    ///     Overwrite the default :math:`\delta` parameter in the Nelder-Mead algorithm
2509    /// simplex_expansion_method : {'greedy_minimization', 'greedy_expansion'}
2510    ///     The expansion method used by the Nelder-Mead algorithm
2511    /// nelder_mead_f_terminator : {'stddev', 'absolute', 'stddev', 'none'}
2512    ///     The function terminator used by the Nelder-Mead algorithm
2513    /// nelder_mead_x_terminator : {'singer', 'diameter', 'rowan', 'higham', 'none'}
2514    ///     The positional terminator used by the Nelder-Mead algorithm
2515    /// threads : int, optional
2516    ///     The number of threads to use (setting this to None will use all available CPUs)
2517    ///
2518    /// Returns
2519    /// -------
2520    /// Status
2521    ///     The status of the minimization algorithm at termination
2522    ///
2523    /// Raises
2524    /// ------
2525    /// Exception
2526    ///     If there was an error building the thread pool
2527    /// ValueError
2528    ///     If any kwargs are invalid
2529    ///
2530    #[pyo3(signature = (p0, *, bounds=None, method="lbfgsb", max_steps=4000, debug=false, verbose=false, **kwargs))]
2531    #[allow(clippy::too_many_arguments)]
2532    fn minimize(
2533        &self,
2534        p0: Vec<Float>,
2535        bounds: Option<Vec<(Option<Float>, Option<Float>)>>,
2536        method: &str,
2537        max_steps: usize,
2538        debug: bool,
2539        verbose: bool,
2540        kwargs: Option<&Bound<'_, PyDict>>,
2541    ) -> PyResult<PyStatus> {
2542        let bounds = bounds.map(|bounds_vec| {
2543            bounds_vec
2544                .iter()
2545                .map(|(opt_lb, opt_ub)| {
2546                    (
2547                        opt_lb.unwrap_or(Float::NEG_INFINITY),
2548                        opt_ub.unwrap_or(Float::INFINITY),
2549                    )
2550                })
2551                .collect()
2552        });
2553        let n_parameters = p0.len();
2554        let options =
2555            py_parse_minimizer_options(n_parameters, method, max_steps, debug, verbose, kwargs)?;
2556        let status = self.0.minimize(&p0, bounds, Some(options))?;
2557        Ok(PyStatus(status))
2558    }
2559
2560    /// Run an MCMC algorithm on the free parameters of the LikelihoodTerm's model
2561    ///
2562    /// This method can be used to sample the underlying log-likelihood given an initial
2563    /// position for each walker `p0`.
2564    ///
2565    /// Parameters
2566    /// ----------
2567    /// p0 : array_like
2568    ///     The initial parameters at the start of optimization
2569    /// n_steps : int,
2570    ///     The number of MCMC steps each walker should take
2571    /// method : {'ESS', 'AIES'}
2572    ///     The MCMC algorithm to use (see additional parameters for fine-tuning)
2573    /// debug : bool, default=False
2574    ///     Set to ``True`` to print out debugging information at each step
2575    /// verbose : bool, default=False
2576    ///     Set to ``True`` to print verbose information at each step
2577    /// seed : int,
2578    ///     The seed for the random number generator
2579    /// ess_moves : list of tuple
2580    ///     A list of moves for the ESS algorithm (see notes)
2581    /// aies_moves : list of tuple
2582    ///     A list of moves for the AIES algorithm (see notes)
2583    /// n_adaptive : int, default=100
2584    ///     Number of adaptive ESS steps to perform at the start of sampling
2585    /// mu : float, default=1.0
2586    ///     ESS adaptive parameter
2587    /// max_ess_steps : int, default=10000
2588    ///     The maximum number of slice expansions/contractions performed in the ESS algorithm
2589    /// threads : int, optional
2590    ///     The number of threads to use (setting this to None will use all available CPUs)
2591    ///
2592    /// Returns
2593    /// -------
2594    /// Ensemble
2595    ///     The resulting ensemble of walkers
2596    ///
2597    /// Raises
2598    /// ------
2599    /// Exception
2600    ///     If there was an error building the thread pool
2601    /// ValueError
2602    ///     If any kwargs are invalid
2603    ///
2604    /// Notes
2605    /// -----
2606    /// Moves may be specified as tuples of ``(move name, usage weight)`` where the move name
2607    /// depends on the algorithm and the usage weight gives the proportion of time that move is
2608    /// used relative to the others in the list.
2609    ///
2610    /// For the Ensemble Slice Sampler (ESS) algorithm, valid move types are "differential" and
2611    /// "gaussian", and the default move set is ``[("differential", 0.9), ("gaussian", 0.1)]``.
2612    ///
2613    /// For the Affine Invariant Ensemble Sampler (AIES) algorithm, valid move types are
2614    /// "stretch" and "walk", and the default move set is ``[("stretch", 0.9), ("walk", 0.1)]``.
2615    ///
2616    /// For AIES, the "stretch" move can also be given with an adaptive parameter ``a``
2617    /// (default=``2``). To add a stretch move with a different value of ``a``, the "move name"
2618    /// can be instead given as a tuple ``(move name, a)``. For example, ``(("stretch", 2.2), 0.3)``
2619    /// creates a stretch move with ``a=2.2`` and usage weight of ``0.3``.
2620    ///
2621    /// Since MCMC methods are inclined to sample maxima rather than minima, the underlying
2622    /// function sign is automatically flipped when calling this method.
2623    ///
2624    #[pyo3(signature = (p0, n_steps, *, method="ESS", debug=false, verbose=false, seed=0, **kwargs))]
2625    #[allow(clippy::too_many_arguments)]
2626    fn mcmc(
2627        &self,
2628        p0: Vec<Vec<Float>>,
2629        n_steps: usize,
2630        method: &str,
2631        debug: bool,
2632        verbose: bool,
2633        seed: u64,
2634        kwargs: Option<&Bound<'_, PyDict>>,
2635    ) -> PyResult<PyEnsemble> {
2636        let p0 = p0.into_iter().map(DVector::from_vec).collect::<Vec<_>>();
2637        let mut rng = Rng::new();
2638        rng.seed(seed);
2639        let options = py_parse_mcmc_options(method, debug, verbose, kwargs, rng.clone())?;
2640        let ensemble = self.0.mcmc(&p0, n_steps, Some(options), rng)?;
2641        Ok(PyEnsemble(ensemble))
2642    }
2643}
2644
2645/// A [`LikelihoodTerm`] which represents a single scaling parameter.
2646#[derive(Clone)]
2647pub struct LikelihoodScalar(String);
2648
2649impl LikelihoodScalar {
2650    /// Create a new [`LikelihoodScalar`] with a parameter with the given name.
2651    pub fn new<T: AsRef<str>>(name: T) -> Box<Self> {
2652        Self(name.as_ref().into()).into()
2653    }
2654}
2655
2656impl LikelihoodTerm for LikelihoodScalar {
2657    fn evaluate(&self, parameters: &[Float]) -> Float {
2658        parameters[0]
2659    }
2660
2661    fn evaluate_gradient(&self, _parameters: &[Float]) -> DVector<Float> {
2662        DVector::from_vec(vec![1.0])
2663    }
2664
2665    fn parameters(&self) -> Vec<String> {
2666        vec![self.0.clone()]
2667    }
2668}
2669
2670/// A parameterized scalar term which can be added to a LikelihoodManager
2671///
2672/// Parameters
2673/// ----------
2674/// name : str
2675///     The name of the new scalar parameter
2676///
2677/// Returns
2678/// -------
2679/// LikelihoodTerm
2680///
2681#[cfg(feature = "python")]
2682#[pyfunction(name = "LikelihoodScalar")]
2683pub fn py_likelihood_scalar(name: String) -> PyLikelihoodTerm {
2684    PyLikelihoodTerm(LikelihoodScalar::new(name))
2685}