laddu_python/
lib.rs

1#![warn(clippy::perf, clippy::style)]
2#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
3use pyo3::prelude::*;
4use pyo3::types::PyDict;
5
6#[cfg_attr(coverage_nightly, coverage(off))]
7pub mod amplitudes;
8#[cfg_attr(coverage_nightly, coverage(off))]
9pub mod data;
10#[cfg_attr(coverage_nightly, coverage(off))]
11pub mod utils;
12
13#[cfg_attr(coverage_nightly, coverage(off))]
14pub mod mpi {
15    #[cfg(not(feature = "mpi"))]
16    use pyo3::exceptions::PyModuleNotFoundError;
17
18    use super::*;
19    /// Check if ``laddu`` was compiled with MPI support (returns ``True`` if it was).
20    ///
21    /// Since ``laddu-mpi`` has the same namespace as ``laddu`` (they both are imported with
22    /// ``import laddu``), this method can be used to check if MPI capabilities are available
23    /// without actually running any MPI code. While functions in the ``laddu.mpi`` module will
24    /// raise an ``ModuleNotFoundError`` if MPI is not supported, its sometimes convenient to have
25    /// a simple boolean check rather than a try-catch block, and this method provides that.
26    ///
27    #[pyfunction]
28    pub fn is_mpi_available() -> bool {
29        #[cfg(feature = "mpi")]
30        return true;
31        #[cfg(not(feature = "mpi"))]
32        return false;
33    }
34    /// Use the Message Passing Interface (MPI) to run on a distributed system
35    ///
36    /// Parameters
37    /// ----------
38    /// trigger: bool, default=True
39    ///     An optional parameter which allows MPI to only be used under some boolean
40    ///     condition.
41    ///
42    /// Notes
43    /// -----
44    /// You must have MPI installed for this to work, and you must call the program with
45    /// ``mpirun <executable>``, or bad things will happen.
46    ///
47    /// MPI runs an identical program on each process, but gives the program an ID called its
48    /// "rank". Only the results of methods on the root process (rank 0) should be
49    /// considered valid, as other processes only contain portions of each dataset. To ensure
50    /// you don't save or print data at other ranks, use the provided ``laddu.mpi.is_root()``
51    /// method to check if the process is the root process.
52    ///
53    /// Once MPI is enabled, it cannot be disabled. If MPI could be toggled (which it can't),
54    /// the other processes will still run, but they will be independent of the root process
55    /// and will no longer communicate with it. The root process stores no data, so it would
56    /// be difficult (and convoluted) to get the results which were already processed via
57    /// MPI.
58    ///
59    /// Additionally, MPI must be enabled at the beginning of a script, at least before any
60    /// other ``laddu`` functions are called. For this reason, it is suggested that you use the
61    /// context manager ``laddu.mpi.MPI`` to ensure the MPI backend is used properly.
62    ///
63    /// If ``laddu.mpi.use_mpi()`` is called multiple times, the subsequent calls will have no
64    /// effect.
65    ///
66    /// You **must** call ``laddu.mpi.finalize_mpi()`` before your program exits for MPI to terminate
67    /// smoothly.
68    ///
69    /// See Also
70    /// --------
71    /// laddu.mpi.MPI
72    /// laddu.mpi.using_mpi
73    /// laddu.mpi.is_root
74    /// laddu.mpi.get_rank
75    /// laddu.mpi.get_size
76    /// laddu.mpi.finalize_mpi
77    ///
78    #[pyfunction]
79    #[allow(unused_variables)]
80    #[pyo3(signature = (*, trigger=true))]
81    pub fn use_mpi(trigger: bool) -> PyResult<()> {
82        #[cfg(feature = "mpi")]
83        {
84            laddu_core::mpi::use_mpi(trigger);
85            Ok(())
86        }
87        #[cfg(not(feature = "mpi"))]
88        return Err(PyModuleNotFoundError::new_err(
89            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
90        ));
91    }
92
93    /// Drop the MPI universe and finalize MPI at the end of a program
94    ///
95    /// This should only be called once and should be called at the end of all ``laddu``-related
96    /// function calls. This **must** be called at the end of any program which uses MPI.
97    ///
98    /// See Also
99    /// --------
100    /// laddu.mpi.use_mpi
101    ///
102    #[pyfunction]
103    pub fn finalize_mpi() -> PyResult<()> {
104        #[cfg(feature = "mpi")]
105        {
106            laddu_core::mpi::finalize_mpi();
107            Ok(())
108        }
109        #[cfg(not(feature = "mpi"))]
110        return Err(PyModuleNotFoundError::new_err(
111            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
112        ));
113    }
114
115    /// Check if MPI is enabled
116    ///
117    /// This can be combined with ``laddu.mpi.is_root()`` to ensure valid results are only
118    /// returned from the root rank process on the condition that MPI is enabled.
119    ///
120    /// See Also
121    /// --------
122    /// laddu.mpi.use_mpi
123    /// laddu.mpi.is_root
124    ///
125    #[pyfunction]
126    pub fn using_mpi() -> PyResult<bool> {
127        #[cfg(feature = "mpi")]
128        return Ok(laddu_core::mpi::using_mpi());
129        #[cfg(not(feature = "mpi"))]
130        return Err(PyModuleNotFoundError::new_err(
131            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
132        ));
133    }
134
135    /// Check if the current MPI process is the root process
136    ///
137    /// This can be combined with ``laddu.mpi.using_mpi()`` to ensure valid results are only
138    /// returned from the root rank process on the condition that MPI is enabled.
139    ///
140    /// See Also
141    /// --------
142    /// laddu.mpi.use_mpi
143    /// laddu.mpi.using_mpi
144    ///
145    #[pyfunction]
146    pub fn is_root() -> PyResult<bool> {
147        #[cfg(feature = "mpi")]
148        return Ok(laddu_core::mpi::is_root());
149        #[cfg(not(feature = "mpi"))]
150        return Err(PyModuleNotFoundError::new_err(
151            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
152        ));
153    }
154
155    /// Get the rank of the current MPI process
156    ///
157    /// Returns ``None`` if MPI is not enabled
158    ///
159    /// See Also
160    /// --------
161    /// laddu.mpi.use_mpi
162    ///
163    #[pyfunction]
164    pub fn get_rank() -> PyResult<Option<i32>> {
165        #[cfg(feature = "mpi")]
166        return Ok(laddu_core::mpi::get_rank());
167        #[cfg(not(feature = "mpi"))]
168        return Err(PyModuleNotFoundError::new_err(
169            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
170        ));
171    }
172
173    /// Get the total number of MPI processes (including the root process)
174    ///
175    /// Returns ``None`` if MPI is not enabled
176    ///
177    /// See Also
178    /// --------
179    /// laddu.mpi.use_mpi
180    ///
181    #[pyfunction]
182    pub fn get_size() -> PyResult<Option<i32>> {
183        #[cfg(feature = "mpi")]
184        return Ok(laddu_core::mpi::get_size());
185        #[cfg(not(feature = "mpi"))]
186        return Err(PyModuleNotFoundError::new_err(
187            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
188        ));
189    }
190}
191
192pub trait GetStrExtractObj {
193    fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
194    where
195        T: for<'py> FromPyObject<'py>;
196}
197
198#[cfg_attr(coverage_nightly, coverage(off))]
199impl GetStrExtractObj for Bound<'_, PyDict> {
200    fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
201    where
202        T: for<'py> FromPyObject<'py>,
203    {
204        self.get_item(key)?
205            .map(|value| value.extract::<T>())
206            .transpose()
207    }
208}