laddu_python/
lib.rs

1#![warn(clippy::perf, clippy::style)]
2#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
3use pyo3::prelude::*;
4use pyo3::types::PyDict;
5
6/// Returns the number of CPUs (logical cores) available for use by ``laddu``.
7///
8#[pyfunction]
9pub fn available_parallelism() -> usize {
10    num_cpus::get()
11}
12
13#[cfg_attr(coverage_nightly, coverage(off))]
14pub mod amplitudes;
15#[cfg_attr(coverage_nightly, coverage(off))]
16pub mod data;
17#[cfg_attr(coverage_nightly, coverage(off))]
18pub mod utils;
19
20#[cfg_attr(coverage_nightly, coverage(off))]
21pub mod mpi {
22    #[cfg(not(feature = "mpi"))]
23    use pyo3::exceptions::PyModuleNotFoundError;
24
25    use super::*;
26    /// Check if ``laddu`` was compiled with MPI support (returns ``True`` if it was).
27    ///
28    /// Since ``laddu-mpi`` has the same namespace as ``laddu`` (they both are imported with
29    /// ``import laddu``), this method can be used to check if MPI capabilities are available
30    /// without actually running any MPI code. While functions in the ``laddu.mpi`` module will
31    /// raise an ``ModuleNotFoundError`` if MPI is not supported, its sometimes convenient to have
32    /// a simple boolean check rather than a try-catch block, and this method provides that.
33    ///
34    #[pyfunction]
35    pub fn is_mpi_available() -> bool {
36        #[cfg(feature = "mpi")]
37        return true;
38        #[cfg(not(feature = "mpi"))]
39        return false;
40    }
41    /// Use the Message Passing Interface (MPI) to run on a distributed system
42    ///
43    /// Parameters
44    /// ----------
45    /// trigger: bool, default=True
46    ///     An optional parameter which allows MPI to only be used under some boolean
47    ///     condition.
48    ///
49    /// Notes
50    /// -----
51    /// You must have MPI installed for this to work, and you must call the program with
52    /// ``mpirun <executable>``, or bad things will happen.
53    ///
54    /// MPI runs an identical program on each process, but gives the program an ID called its
55    /// "rank". Only the results of methods on the root process (rank 0) should be
56    /// considered valid, as other processes only contain portions of each dataset. To ensure
57    /// you don't save or print data at other ranks, use the provided ``laddu.mpi.is_root()``
58    /// method to check if the process is the root process.
59    ///
60    /// Once MPI is enabled, it cannot be disabled. If MPI could be toggled (which it can't),
61    /// the other processes will still run, but they will be independent of the root process
62    /// and will no longer communicate with it. The root process stores no data, so it would
63    /// be difficult (and convoluted) to get the results which were already processed via
64    /// MPI.
65    ///
66    /// Additionally, MPI must be enabled at the beginning of a script, at least before any
67    /// other ``laddu`` functions are called. For this reason, it is suggested that you use the
68    /// context manager ``laddu.mpi.MPI`` to ensure the MPI backend is used properly.
69    ///
70    /// If ``laddu.mpi.use_mpi()`` is called multiple times, the subsequent calls will have no
71    /// effect.
72    ///
73    /// You **must** call ``laddu.mpi.finalize_mpi()`` before your program exits for MPI to terminate
74    /// smoothly.
75    ///
76    /// See Also
77    /// --------
78    /// laddu.mpi.MPI
79    /// laddu.mpi.using_mpi
80    /// laddu.mpi.is_root
81    /// laddu.mpi.get_rank
82    /// laddu.mpi.get_size
83    /// laddu.mpi.finalize_mpi
84    ///
85    #[pyfunction]
86    #[allow(unused_variables)]
87    #[pyo3(signature = (*, trigger=true))]
88    pub fn use_mpi(trigger: bool) -> PyResult<()> {
89        #[cfg(feature = "mpi")]
90        {
91            laddu_core::mpi::use_mpi(trigger);
92            Ok(())
93        }
94        #[cfg(not(feature = "mpi"))]
95        return Err(PyModuleNotFoundError::new_err(
96            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
97        ));
98    }
99
100    /// Drop the MPI universe and finalize MPI at the end of a program
101    ///
102    /// This should only be called once and should be called at the end of all ``laddu``-related
103    /// function calls. This **must** be called at the end of any program which uses MPI.
104    ///
105    /// See Also
106    /// --------
107    /// laddu.mpi.use_mpi
108    ///
109    #[pyfunction]
110    pub fn finalize_mpi() -> PyResult<()> {
111        #[cfg(feature = "mpi")]
112        {
113            laddu_core::mpi::finalize_mpi();
114            Ok(())
115        }
116        #[cfg(not(feature = "mpi"))]
117        return Err(PyModuleNotFoundError::new_err(
118            "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
119        ));
120    }
121
122    /// Check if MPI is enabled
123    ///
124    /// This can be combined with ``laddu.mpi.is_root()`` to ensure valid results are only
125    /// returned from the root rank process on the condition that MPI is enabled.
126    ///
127    /// See Also
128    /// --------
129    /// laddu.mpi.use_mpi
130    /// laddu.mpi.is_root
131    ///
132    #[pyfunction]
133    pub fn using_mpi() -> bool {
134        #[cfg(feature = "mpi")]
135        return laddu_core::mpi::using_mpi();
136        #[cfg(not(feature = "mpi"))]
137        return false;
138    }
139
140    /// Check if the current MPI process is the root process
141    ///
142    /// This can be combined with ``laddu.mpi.using_mpi()`` to ensure valid results are only
143    /// returned from the root rank process on the condition that MPI is enabled.
144    ///
145    /// See Also
146    /// --------
147    /// laddu.mpi.use_mpi
148    /// laddu.mpi.using_mpi
149    ///
150    #[pyfunction]
151    pub fn is_root() -> bool {
152        #[cfg(feature = "mpi")]
153        return laddu_core::mpi::is_root();
154        #[cfg(not(feature = "mpi"))]
155        return true;
156    }
157
158    /// Get the rank of the current MPI process
159    ///
160    /// Returns ``0`` if MPI is not enabled
161    ///
162    /// See Also
163    /// --------
164    /// laddu.mpi.use_mpi
165    ///
166    #[pyfunction]
167    pub fn get_rank() -> i32 {
168        #[cfg(feature = "mpi")]
169        return laddu_core::mpi::get_rank();
170        #[cfg(not(feature = "mpi"))]
171        return 0;
172    }
173
174    /// Get the total number of MPI processes (including the root process)
175    ///
176    /// Returns ``1`` if MPI is not enabled
177    ///
178    /// See Also
179    /// --------
180    /// laddu.mpi.use_mpi
181    ///
182    #[pyfunction]
183    pub fn get_size() -> i32 {
184        #[cfg(feature = "mpi")]
185        return laddu_core::mpi::get_size();
186        #[cfg(not(feature = "mpi"))]
187        return 1;
188    }
189}
190
191pub trait GetStrExtractObj {
192    fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
193    where
194        T: for<'a, 'py> FromPyObject<'a, 'py, Error = PyErr>;
195}
196
197#[cfg_attr(coverage_nightly, coverage(off))]
198impl GetStrExtractObj for Bound<'_, PyDict> {
199    fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
200    where
201        T: for<'a, 'py> FromPyObject<'a, 'py, Error = PyErr>,
202    {
203        self.get_item(key)?
204            .map(|value| value.extract::<T>())
205            .transpose()
206    }
207}