1use crate::Value;
77use ffi::FFI;
78use std::marker::PhantomData;
79use std::mem::transmute;
80use std::os::raw::c_void;
81use std::slice;
82
83ffi_wrapper!(PlainMonteCarlo, *mut sys::gsl_monte_plain_state, gsl_monte_plain_free,
84"The plain Monte Carlo algorithm samples points randomly from the integration region to estimate
85the integral and its error. Using this algorithm the estimate of the integral E(f; N) for N
86randomly distributed points x_i is given by,
87
88```text
89E(f; N) = = V <f> = (V / N) sum_i^N f(x_i)
90```
91
92where V is the volume of the integration region. The error on this estimate `sigma(E;N)` is
93calculated from the estimated variance of the mean,
94
95```text
96sigma^2 (E; N) = (V^2 / N^2) sum_i^N (f(x_i) - <f>)^2.
97```
98
99For large N this variance decreases asymptotically as `Var(f)/N`, where `Var(f)` is the true
100variance of the function over the integration region. The error estimate itself should decrease as
101`sigma(f)/sqrt{N}`. The familiar law of errors decreasing as `1/sqrt{N}` applies-to reduce the
102error by a factor of 10 requires a 100-fold increase in the number of sample points.");
103
104impl PlainMonteCarlo {
105 #[doc(alias = "gsl_monte_plain_alloc")]
107 pub fn new(dim: usize) -> Option<PlainMonteCarlo> {
108 let tmp = unsafe { sys::gsl_monte_plain_alloc(dim) };
109
110 if tmp.is_null() {
111 None
112 } else {
113 Some(PlainMonteCarlo::wrap(tmp))
114 }
115 }
116
117 #[doc(alias = "gsl_monte_plain_init")]
120 pub fn init(&mut self) -> Result<(), Value> {
121 let ret = unsafe { sys::gsl_monte_plain_init(self.unwrap_unique()) };
122 result_handler!(ret, ())
123 }
124
125 #[doc(alias = "gsl_monte_plain_integrate")]
136 pub fn integrate<F: FnMut(&[f64]) -> f64>(
137 &mut self,
138 f: F,
139 xl: &[f64],
140 xu: &[f64],
141 t_calls: usize,
142 r: &mut crate::Rng,
143 ) -> Result<(f64, f64), Value> {
144 assert!(xl.len() == xu.len());
145 let mut result = 0f64;
146 let mut abserr = 0f64;
147 let f: Box<F> = Box::new(f);
148 let ret = unsafe {
149 let func = sys::gsl_monte_function {
150 f: transmute(monte_trampoline::<F> as usize),
151 dim: xl.len() as _,
152 params: Box::into_raw(f) as *mut _,
153 };
154 sys::gsl_monte_plain_integrate(
155 &func,
156 xl.as_ptr(),
157 xu.as_ptr(),
158 xl.len() as _,
159 t_calls,
160 r.unwrap_unique(),
161 self.unwrap_unique(),
162 &mut result,
163 &mut abserr,
164 )
165 };
166 result_handler!(ret, (result, abserr))
167 }
168}
169
170ffi_wrapper!(MiserMonteCarlo, *mut sys::gsl_monte_miser_state, gsl_monte_miser_free,
171"The MISER algorithm of Press and Farrar is based on recursive stratified sampling. This technique
172aims to reduce the overall integration error by concentrating integration points in the regions of
173highest variance.
174
175The idea of stratified sampling begins with the observation that for two disjoint regions a and b
176with Monte Carlo estimates of the integral E_a(f) and E_b(f) and variances `sigma_a^2(f)` and
177`sigma_b^2(f)`, the variance `Var(f)` of the combined estimate `E(f) = (1/2) (E_a(f) + E_b(f))`
178is given by,
179
180```text
181Var(f) = (sigma_a^2(f) / 4 N_a) + (sigma_b^2(f) / 4 N_b).
182```
183
184It can be shown that this variance is minimized by distributing the points such that,
185
186```text
187N_a / (N_a + N_b) = sigma_a / (sigma_a + sigma_b).
188```
189
190Hence the smallest error estimate is obtained by allocating sample points in proportion to the
191standard deviation of the function in each sub-region.
192
193The MISER algorithm proceeds by bisecting the integration region along one coordinate axis to give
194two sub-regions at each step. The direction is chosen by examining all d possible bisections and
195selecting the one which will minimize the combined variance of the two sub-regions. The variance in
196the sub-regions is estimated by sampling with a fraction of the total number of points available to
197the current step. The same procedure is then repeated recursively for each of the two half-spaces
198from the best bisection. The remaining sample points are allocated to the sub-regions using the
199formula for N_a and N_b. This recursive allocation of integration points continues down to a
200user-specified depth where each sub-region is integrated using a plain Monte Carlo estimate. These
201individual values and their error estimates are then combined upwards to give an overall result and
202an estimate of its error.");
203
204impl MiserMonteCarlo {
205 #[doc(alias = "gsl_monte_miser_alloc")]
208 pub fn new(dim: usize) -> Option<MiserMonteCarlo> {
209 let tmp = unsafe { sys::gsl_monte_miser_alloc(dim) };
210
211 if tmp.is_null() {
212 None
213 } else {
214 Some(MiserMonteCarlo::wrap(tmp))
215 }
216 }
217
218 #[doc(alias = "gsl_monte_miser_init")]
220 pub fn init(&mut self) -> Result<(), Value> {
221 let ret = unsafe { sys::gsl_monte_miser_init(self.unwrap_unique()) };
222 result_handler!(ret, ())
223 }
224
225 #[doc(alias = "gsl_monte_miser_integrate")]
236 pub fn integrate<F: FnMut(&[f64]) -> f64>(
237 &mut self,
238 f: F,
239 xl: &[f64],
240 xu: &[f64],
241 t_calls: usize,
242 r: &mut crate::Rng,
243 ) -> Result<(f64, f64), Value> {
244 assert!(xl.len() == xu.len());
245 let mut result = 0f64;
246 let mut abserr = 0f64;
247 let f: Box<F> = Box::new(f);
248 let ret = unsafe {
249 let mut func = sys::gsl_monte_function {
250 f: transmute(monte_trampoline::<F> as usize),
251 dim: xl.len() as _,
252 params: Box::into_raw(f) as *mut _,
253 };
254 sys::gsl_monte_miser_integrate(
255 &mut func,
256 xl.as_ptr(),
257 xu.as_ptr(),
258 xl.len() as _,
259 t_calls,
260 r.unwrap_unique(),
261 self.unwrap_unique(),
262 &mut result,
263 &mut abserr,
264 )
265 };
266 result_handler!(ret, (result, abserr))
267 }
268
269 #[doc(alias = "gsl_monte_miser_params_get")]
272 pub fn get_params(&self) -> MiserParams {
273 let mut m = sys::gsl_monte_miser_params {
274 estimate_frac: 0f64,
275 min_calls: 0,
276 min_calls_per_bisection: 0,
277 alpha: 0f64,
278 dither: 0f64,
279 };
280
281 unsafe {
282 sys::gsl_monte_miser_params_get(self.unwrap_shared(), &mut m);
283 }
284 MiserParams(m)
285 }
286
287 #[doc(alias = "gsl_monte_miser_params_set")]
290 pub fn set_params(&mut self, params: &MiserParams) {
291 unsafe {
292 sys::gsl_monte_miser_params_set(self.unwrap_unique(), ¶ms.0 as *const _);
293 }
294 }
295}
296
297#[derive(Debug, Clone)]
298#[repr(C)]
299pub struct MiserParams(pub sys::gsl_monte_miser_params);
300
301ffi_wrapper!(VegasMonteCarlo, *mut sys::gsl_monte_vegas_state, gsl_monte_vegas_free,
302"The VEGAS algorithm of Lepage is based on importance sampling. It samples points from the probability
303distribution described by the function |f|, so that the points are concentrated in the regions that
304make the largest contribution to the integral.
305
306In general, if the Monte Carlo integral of f is sampled with points distributed according to a
307probability distribution described by the function g, we obtain an estimate E_g(f; N),
308
309```text
310E_g(f; N) = E(f/g; N)
311```
312
313with a corresponding variance,
314
315```text
316Var_g(f; N) = Var(f/g; N).
317```
318
319If the probability distribution is chosen as g = |f|/I(|f|) then it can be shown that the variance
320V_g(f; N) vanishes, and the error in the estimate will be zero. In practice it is not possible to
321sample from the exact distribution g for an arbitrary function, so importance sampling algorithms
322aim to produce efficient approximations to the desired distribution.
323
324The VEGAS algorithm approximates the exact distribution by making a number of passes over the
325integration region while histogramming the function f. Each histogram is used to define a sampling
326distribution for the next pass. Asymptotically this procedure converges to the desired distribution.
327In order to avoid the number of histogram bins growing like K^d the probability distribution is
328approximated by a separable function: g(x_1, x_2, ...) = g_1(x_1) g_2(x_2) ... so that the number
329of bins required is only Kd. This is equivalent to locating the peaks of the function from the
330projections of the integrand onto the coordinate axes. The efficiency of VEGAS depends on the
331validity of this assumption. It is most efficient when the peaks of the integrand are
332well-localized. If an integrand can be rewritten in a form which is approximately separable this
333will increase the efficiency of integration with VEGAS.
334
335VEGAS incorporates a number of additional features, and combines both stratified sampling and
336importance sampling. The integration region is divided into a number of “boxes”, with each box
337getting a fixed number of points (the goal is 2). Each box can then have a fractional number of
338bins, but if the ratio of bins-per-box is less than two, Vegas switches to a kind variance reduction
339(rather than importance sampling).
340
341The VEGAS algorithm computes a number of independent estimates of the integral internally, according
342to the iterations parameter described below, and returns their weighted average. Random sampling of
343the integrand can occasionally produce an estimate where the error is zero, particularly if the
344function is constant in some regions. An estimate with zero error causes the weighted average to
345break down and must be handled separately. In the original Fortran implementations of VEGAS the
346error estimate is made non-zero by substituting a small value (typically 1e-30). The implementation
347in GSL differs from this and avoids the use of an arbitrary constant—it either assigns the value a
348weight which is the average weight of the preceding estimates or discards it according to the
349following procedure,
350
351current estimate has zero error, weighted average has finite error
352
353* The current estimate is assigned a weight which is the average weight of the preceding estimates.
354current estimate has finite error, previous estimates had zero error
355
356* The previous estimates are discarded and the weighted averaging procedure begins with the current estimate.
357current estimate has zero error, previous estimates had zero error
358
359* The estimates are averaged using the arithmetic mean, but no error is computed.");
360
361impl VegasMonteCarlo {
362 #[doc(alias = "gsl_monte_vegas_alloc")]
365 pub fn new(dim: usize) -> Option<VegasMonteCarlo> {
366 let tmp = unsafe { sys::gsl_monte_vegas_alloc(dim) };
367
368 if tmp.is_null() {
369 None
370 } else {
371 Some(VegasMonteCarlo::wrap(tmp))
372 }
373 }
374
375 #[doc(alias = "gsl_monte_vegas_init")]
378 pub fn init(&mut self) -> Result<(), Value> {
379 let ret = unsafe { sys::gsl_monte_vegas_init(self.unwrap_unique()) };
380 result_handler!(ret, ())
381 }
382
383 #[doc(alias = "gsl_monte_vegas_integrate")]
398 pub fn integrate<F: FnMut(&[f64]) -> f64>(
399 &mut self,
400 f: F,
401 xl: &[f64],
402 xu: &[f64],
403 t_calls: usize,
404 r: &mut crate::Rng,
405 ) -> Result<(f64, f64), Value> {
406 assert!(xl.len() == xu.len());
407 let mut result = 0f64;
408 let mut abserr = 0f64;
409 let f: Box<F> = Box::new(f);
410 let ret = unsafe {
411 let mut func = sys::gsl_monte_function {
412 f: transmute(monte_trampoline::<F> as usize),
413 dim: xl.len() as _,
414 params: Box::into_raw(f) as *mut _,
415 };
416 sys::gsl_monte_vegas_integrate(
417 &mut func,
418 xl.as_ptr() as usize as *mut _,
419 xu.as_ptr() as usize as *mut _,
420 xl.len() as _,
421 t_calls,
422 r.unwrap_unique(),
423 self.unwrap_unique(),
424 &mut result,
425 &mut abserr,
426 )
427 };
428 result_handler!(ret, (result, abserr))
429 }
430
431 #[doc(alias = "gsl_monte_vegas_chisq")]
436 pub fn chisq(&mut self) -> f64 {
437 unsafe { sys::gsl_monte_vegas_chisq(self.unwrap_unique()) }
438 }
439
440 #[doc(alias = "gsl_monte_vegas_runval")]
445 pub fn runval(&mut self) -> (f64, f64) {
446 let mut result = 0.;
447 let mut sigma = 0.;
448 unsafe { sys::gsl_monte_vegas_runval(self.unwrap_unique(), &mut result, &mut sigma) };
449 (result, sigma)
450 }
451
452 #[doc(alias = "gsl_monte_vegas_params_get")]
454 pub fn get_params(&self) -> VegasParams {
455 let mut params = VegasParams::default();
456 unsafe {
457 sys::gsl_monte_vegas_params_get(self.unwrap_shared(), &mut params.inner as *mut _);
458 }
459 params
460 }
461
462 #[doc(alias = "gsl_monte_vegas_params_set")]
464 pub fn set_params(&mut self, params: &VegasParams) {
465 unsafe {
466 sys::gsl_monte_vegas_params_set(self.unwrap_unique(), ¶ms.inner as *const _);
467 }
468 }
469}
470
471pub struct VegasParams<'a> {
472 inner: sys::gsl_monte_vegas_params,
473 lt: PhantomData<&'a ()>,
474}
475
476impl<'a> VegasParams<'a> {
477 pub fn new(
502 alpha: f64,
503 iterations: usize,
504 stage: i32,
505 mode: ::VegasMode,
506 verbosity: VegasVerbosity,
507 stream: Option<&'a mut ::IOStream>,
508 ) -> Result<VegasParams, String> {
509 if !verbosity.is_off() && stream.is_none() {
510 return Err(
511 "rust-GSL: need to provide an input stream for Vegas Monte Carlo \
512 integration if verbosity is not 'Off'"
513 .to_string(),
514 );
515 } else if verbosity.is_off() && stream.is_some() {
516 return Err(
517 "rust-GSL: need to provide the verbosity flag for Vegas Monta Carlo \
518 integration, currently set to 'Off'"
519 .to_string(),
520 );
521 }
522
523 let stream = if let Some(stream) = stream {
524 if !stream.write_mode() {
525 return Err("rust-GSL: input stream not flagged as 'write' mode".to_string());
526 }
527 stream.as_raw()
528 } else {
529 std::ptr::null_mut()
530 };
531 Ok(VegasParams {
532 inner: sys::gsl_monte_vegas_params {
533 alpha,
534 iterations,
535 stage,
536 mode: mode.into(),
537 verbose: verbosity.to_int(),
538 ostream: stream,
539 },
540 lt: PhantomData,
541 })
542 }
543}
544
545impl<'a> std::default::Default for VegasParams<'a> {
546 fn default() -> VegasParams<'a> {
547 VegasParams {
548 inner: sys::gsl_monte_vegas_params {
549 alpha: 1.5,
550 iterations: 5,
551 stage: 0,
552 mode: ::VegasMode::ImportanceOnly.into(),
553 verbose: -1,
554 ostream: std::ptr::null_mut(),
555 },
556 lt: PhantomData,
557 }
558 }
559}
560
561#[derive(Clone, Copy)]
566pub enum VegasVerbosity {
567 Off, Summary, Grid, Rebinning, }
572
573impl VegasVerbosity {
574 fn to_int(self) -> i32 {
575 match self {
576 VegasVerbosity::Off => -1,
577 VegasVerbosity::Summary => 0,
578 VegasVerbosity::Grid => 1,
579 VegasVerbosity::Rebinning => 2,
580 }
581 }
582
583 fn is_off(self) -> bool {
584 matches!(self, VegasVerbosity::Off)
585 }
586}
587
588unsafe extern "C" fn monte_trampoline<F: FnMut(&[f64]) -> f64>(
589 x: *mut f64,
590 dim: usize,
591 param: *mut c_void,
592) -> f64 {
593 let f: &mut F = &mut *(param as *mut F);
594 f(slice::from_raw_parts(x, dim))
595}
596
597#[test]
667fn plain() {
668 use std::f64::consts::PI;
669
670 fn g(k: &[f64]) -> f64 {
671 let a = 1f64 / (PI * PI * PI);
672
673 a / (1.0 - k[0].cos() * k[1].cos() * k[2].cos())
674 }
675
676 let xl: [f64; 3] = [0f64; 3];
677 let xu: [f64; 3] = [PI, PI, PI];
678
679 let calls = 500000;
680
681 crate::RngType::env_setup();
682 let mut r = crate::Rng::new(::RngType::default()).unwrap();
683
684 {
685 let mut s = PlainMonteCarlo::new(3).unwrap();
686
687 let (res, err) = s.integrate(g, &xl, &xu, calls, &mut r).unwrap();
688 assert_eq!(&format!("{:.6}", res), "1.412209");
689 assert_eq!(&format!("{:.6}", err), "0.013436");
690 }
691}
692
693#[test]
694fn miser() {
695 use std::f64::consts::PI;
696 fn g(k: &[f64]) -> f64 {
697 let a = 1f64 / (PI * PI * PI);
698
699 a / (1.0 - k[0].cos() * k[1].cos() * k[2].cos())
700 }
701
702 let xl: [f64; 3] = [0f64; 3];
703 let xu: [f64; 3] = [PI, PI, PI];
704
705 let calls = 500000;
706
707 crate::RngType::env_setup();
708 let mut r = crate::Rng::new(::RngType::default()).unwrap();
709
710 {
711 let mut s = MiserMonteCarlo::new(3).unwrap();
712
713 let (res, err) = s.integrate(g, &xl, &xu, calls, &mut r).unwrap();
714 assert_eq!(&format!("{:.6}", res), "1.389530");
715 assert_eq!(&format!("{:.6}", err), "0.005011");
716 }
717}
718
719#[test]
720fn miser_closure() {
721 use std::f64::consts::PI;
722
723 let xl: [f64; 3] = [0f64; 3];
724 let xu: [f64; 3] = [PI, PI, PI];
725
726 let calls = 500000;
727
728 crate::RngType::env_setup();
729 let mut r = crate::Rng::new(::RngType::default()).unwrap();
730
731 {
732 let mut s = MiserMonteCarlo::new(3).unwrap();
733
734 let (res, err) = s
735 .integrate(
736 |k| {
737 let a = 1f64 / (PI * PI * PI);
738
739 a / (1.0 - k[0].cos() * k[1].cos() * k[2].cos())
740 },
741 &xl,
742 &xu,
743 calls,
744 &mut r,
745 )
746 .unwrap();
747 assert_eq!(&format!("{:.6}", res), "1.389530");
748 assert_eq!(&format!("{:.6}", err), "0.005011");
749 }
750}
751
752#[test]
753fn vegas_warm_up() {
754 use std::f64::consts::PI;
755 fn g(k: &[f64]) -> f64 {
756 let a = 1f64 / (PI * PI * PI);
757
758 a / (1.0 - k[0].cos() * k[1].cos() * k[2].cos())
759 }
760
761 let xl: [f64; 3] = [0f64; 3];
762 let xu: [f64; 3] = [PI, PI, PI];
763
764 crate::RngType::env_setup();
765 let mut r = crate::Rng::new(::RngType::default()).unwrap();
766
767 {
768 let mut s = VegasMonteCarlo::new(3).unwrap();
769
770 let (res, err) = s.integrate(g, &xl, &xu, 10000, &mut r).unwrap();
771 assert_eq!(&format!("{:.6}", res), "1.385603");
772 assert_eq!(&format!("{:.6}", err), "0.002212");
773 }
774}
775
776#[test]
777fn vegas() {
778 use std::f64::consts::PI;
779 fn g(k: &[f64]) -> f64 {
780 let a = 1. / (PI * PI * PI);
781
782 a / (1. - k[0].cos() * k[1].cos() * k[2].cos())
783 }
784
785 let calls = 500000;
786
787 let xl: [f64; 3] = [0f64; 3];
788 let xu: [f64; 3] = [PI, PI, PI];
789
790 crate::RngType::env_setup();
791 let mut r = crate::Rng::new(::RngType::default()).unwrap();
792
793 {
794 let mut s = VegasMonteCarlo::new(3).unwrap();
795
796 s.integrate(g, &xl, &xu, 10000, &mut r).unwrap();
797 let mut res;
798 let mut err;
799 loop {
800 let (_res, _err) = s.integrate(g, &xl, &xu, calls / 5, &mut r).unwrap();
801 res = _res;
802 err = _err;
803 println!(
804 "result = {:.6} sigma = {:.6} chisq/dof = {:.1}",
805 res,
806 err,
807 s.chisq()
808 );
809 if (s.chisq() - 1f64).abs() <= 0.5f64 {
810 break;
811 }
812 }
813 assert_eq!(&format!("{:.6}", res), "1.393307");
814 assert_eq!(&format!("{:.6}", err), "0.000335");
815 }
816}