laddu_python/lib.rs
1#![warn(clippy::perf, clippy::style)]
2#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
3use pyo3::prelude::*;
4use pyo3::types::PyDict;
5
6/// Returns the number of CPUs (logical cores) available for use by ``laddu``.
7///
8#[pyfunction]
9pub fn available_parallelism() -> usize {
10 num_cpus::get()
11}
12
13#[cfg_attr(coverage_nightly, coverage(off))]
14pub mod amplitudes;
15#[cfg_attr(coverage_nightly, coverage(off))]
16pub mod data;
17#[cfg_attr(coverage_nightly, coverage(off))]
18pub mod utils;
19
20#[cfg_attr(coverage_nightly, coverage(off))]
21pub mod mpi {
22 #[cfg(not(feature = "mpi"))]
23 use pyo3::exceptions::PyModuleNotFoundError;
24
25 use super::*;
26 /// Check if ``laddu`` was compiled with MPI support (returns ``True`` if it was).
27 ///
28 /// Since ``laddu-mpi`` has the same namespace as ``laddu`` (they both are imported with
29 /// ``import laddu``), this method can be used to check if MPI capabilities are available
30 /// without actually running any MPI code. While functions in the ``laddu.mpi`` module will
31 /// raise an ``ModuleNotFoundError`` if MPI is not supported, its sometimes convenient to have
32 /// a simple boolean check rather than a try-catch block, and this method provides that.
33 ///
34 #[pyfunction]
35 pub fn is_mpi_available() -> bool {
36 #[cfg(feature = "mpi")]
37 return true;
38 #[cfg(not(feature = "mpi"))]
39 return false;
40 }
41 /// Use the Message Passing Interface (MPI) to run on a distributed system
42 ///
43 /// Parameters
44 /// ----------
45 /// trigger: bool, default=True
46 /// An optional parameter which allows MPI to only be used under some boolean
47 /// condition.
48 ///
49 /// Notes
50 /// -----
51 /// You must have MPI installed for this to work, and you must call the program with
52 /// ``mpirun <executable>``, or bad things will happen.
53 ///
54 /// MPI runs an identical program on each process, but gives the program an ID called its
55 /// "rank". Only the results of methods on the root process (rank 0) should be
56 /// considered valid, as other processes only contain portions of each dataset. To ensure
57 /// you don't save or print data at other ranks, use the provided ``laddu.mpi.is_root()``
58 /// method to check if the process is the root process.
59 ///
60 /// Once MPI is enabled, it cannot be disabled. If MPI could be toggled (which it can't),
61 /// the other processes will still run, but they will be independent of the root process
62 /// and will no longer communicate with it. The root process stores no data, so it would
63 /// be difficult (and convoluted) to get the results which were already processed via
64 /// MPI.
65 ///
66 /// Additionally, MPI must be enabled at the beginning of a script, at least before any
67 /// other ``laddu`` functions are called. For this reason, it is suggested that you use the
68 /// context manager ``laddu.mpi.MPI`` to ensure the MPI backend is used properly.
69 ///
70 /// If ``laddu.mpi.use_mpi()`` is called multiple times, the subsequent calls will have no
71 /// effect.
72 ///
73 /// You **must** call ``laddu.mpi.finalize_mpi()`` before your program exits for MPI to terminate
74 /// smoothly.
75 ///
76 /// See Also
77 /// --------
78 /// laddu.mpi.MPI
79 /// laddu.mpi.using_mpi
80 /// laddu.mpi.is_root
81 /// laddu.mpi.get_rank
82 /// laddu.mpi.get_size
83 /// laddu.mpi.finalize_mpi
84 ///
85 #[pyfunction]
86 #[allow(unused_variables)]
87 #[pyo3(signature = (*, trigger=true))]
88 pub fn use_mpi(trigger: bool) -> PyResult<()> {
89 #[cfg(feature = "mpi")]
90 {
91 laddu_core::mpi::use_mpi(trigger);
92 Ok(())
93 }
94 #[cfg(not(feature = "mpi"))]
95 return Err(PyModuleNotFoundError::new_err(
96 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
97 ));
98 }
99
100 /// Drop the MPI universe and finalize MPI at the end of a program
101 ///
102 /// This should only be called once and should be called at the end of all ``laddu``-related
103 /// function calls. This **must** be called at the end of any program which uses MPI.
104 ///
105 /// See Also
106 /// --------
107 /// laddu.mpi.use_mpi
108 ///
109 #[pyfunction]
110 pub fn finalize_mpi() -> PyResult<()> {
111 #[cfg(feature = "mpi")]
112 {
113 laddu_core::mpi::finalize_mpi();
114 Ok(())
115 }
116 #[cfg(not(feature = "mpi"))]
117 return Err(PyModuleNotFoundError::new_err(
118 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
119 ));
120 }
121
122 /// Check if MPI is enabled
123 ///
124 /// This can be combined with ``laddu.mpi.is_root()`` to ensure valid results are only
125 /// returned from the root rank process on the condition that MPI is enabled.
126 ///
127 /// See Also
128 /// --------
129 /// laddu.mpi.use_mpi
130 /// laddu.mpi.is_root
131 ///
132 #[pyfunction]
133 pub fn using_mpi() -> PyResult<bool> {
134 #[cfg(feature = "mpi")]
135 return Ok(laddu_core::mpi::using_mpi());
136 #[cfg(not(feature = "mpi"))]
137 return Err(PyModuleNotFoundError::new_err(
138 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
139 ));
140 }
141
142 /// Check if the current MPI process is the root process
143 ///
144 /// This can be combined with ``laddu.mpi.using_mpi()`` to ensure valid results are only
145 /// returned from the root rank process on the condition that MPI is enabled.
146 ///
147 /// See Also
148 /// --------
149 /// laddu.mpi.use_mpi
150 /// laddu.mpi.using_mpi
151 ///
152 #[pyfunction]
153 pub fn is_root() -> PyResult<bool> {
154 #[cfg(feature = "mpi")]
155 return Ok(laddu_core::mpi::is_root());
156 #[cfg(not(feature = "mpi"))]
157 return Err(PyModuleNotFoundError::new_err(
158 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
159 ));
160 }
161
162 /// Get the rank of the current MPI process
163 ///
164 /// Returns ``None`` if MPI is not enabled
165 ///
166 /// See Also
167 /// --------
168 /// laddu.mpi.use_mpi
169 ///
170 #[pyfunction]
171 pub fn get_rank() -> PyResult<Option<i32>> {
172 #[cfg(feature = "mpi")]
173 return Ok(laddu_core::mpi::get_rank());
174 #[cfg(not(feature = "mpi"))]
175 return Err(PyModuleNotFoundError::new_err(
176 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
177 ));
178 }
179
180 /// Get the total number of MPI processes (including the root process)
181 ///
182 /// Returns ``None`` if MPI is not enabled
183 ///
184 /// See Also
185 /// --------
186 /// laddu.mpi.use_mpi
187 ///
188 #[pyfunction]
189 pub fn get_size() -> PyResult<Option<i32>> {
190 #[cfg(feature = "mpi")]
191 return Ok(laddu_core::mpi::get_size());
192 #[cfg(not(feature = "mpi"))]
193 return Err(PyModuleNotFoundError::new_err(
194 "`laddu` was not compiled with MPI support! Please use `laddu-mpi` instead.",
195 ));
196 }
197}
198
199pub trait GetStrExtractObj {
200 fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
201 where
202 T: for<'py> FromPyObject<'py>;
203}
204
205#[cfg_attr(coverage_nightly, coverage(off))]
206impl GetStrExtractObj for Bound<'_, PyDict> {
207 fn get_extract<T>(&self, key: &str) -> PyResult<Option<T>>
208 where
209 T: for<'py> FromPyObject<'py>,
210 {
211 self.get_item(key)?
212 .map(|value| value.extract::<T>())
213 .transpose()
214 }
215}