Skip to main content

gluex_lumi/
lib.rs

1//! `GlueX` photon-flux and tagged-luminosity utilities.
2//!
3//! This crate builds run-dependent flux and luminosity histograms from RCDB and
4//! CCDB calibration sources.
5
6use chrono::{DateTime, TimeZone, Utc};
7use gluex_ccdb::{CCDBContext, CCDBError, CCDB};
8use gluex_rcdb::{RCDBContext, RCDBError, RCDB};
9use serde::{Deserialize, Serialize};
10use std::{
11    collections::{HashMap, HashSet},
12    env,
13    path::{Path, PathBuf},
14    str::FromStr,
15};
16use thiserror::Error;
17
18/// Command-line entry points for the `gluex-lumi` executable.
19pub mod cli;
20
21/// Radiation length of beryllium in meters.
22pub const BERILLIUM_RADIATION_LENGTH_METERS: f64 = 35.28e-2;
23
24pub use gluex_core::{
25    run_periods::RunPeriod, GlueXCoreError, Histogram, RESTVersion, RESTVersionSelection, RunNumber,
26};
27
28#[derive(Error, Debug)]
29/// Errors returned by luminosity context construction and histogram generation.
30pub enum LuminosityError {
31    /// Wrapper around [`RCDBError`].
32    #[error(transparent)]
33    RCDBError(#[from] RCDBError),
34    /// Wrapper around [`CCDBError`].
35    #[error(transparent)]
36    CCDBError(#[from] CCDBError),
37    /// Failed to parse or map a converter description from RCDB.
38    #[error("unknown radiator: {0}")]
39    UnknownRadiator(String),
40    /// Endpoint calibration was required but unavailable for this run.
41    #[error("Missing endpoint calibration for run {0}")]
42    MissingEndpointCalibration(RunNumber),
43    /// Wrapper around [`GlueXCoreError`].
44    #[error(transparent)]
45    GlueXCoreError(#[from] GlueXCoreError),
46    /// No runs remained after selection and exclusions.
47    #[error("at least one run number is required")]
48    EmptyRunSelection,
49}
50
51#[derive(Debug, Copy, Clone)]
52/// Polarimeter converter configuration used to compute radiation-length scaling.
53pub enum Converter {
54    /// No converter in beam.
55    Retracted,
56    /// Unknown converter state.
57    Unknown,
58    /// 750 um beryllium converter.
59    Be750um,
60    /// 75 um beryllium converter.
61    Be75um,
62    /// 50 um beryllium converter.
63    Be50um,
64}
65impl FromStr for Converter {
66    type Err = LuminosityError;
67
68    fn from_str(s: &str) -> Result<Self, Self::Err> {
69        match s {
70            "Retracted" => Ok(Self::Retracted),
71            "Unknown" => Ok(Self::Unknown),
72            "Be 750um" => Ok(Self::Be750um),
73            "Be 75um" => Ok(Self::Be75um),
74            "Be 50um" => Ok(Self::Be50um),
75            _ => Err(LuminosityError::UnknownRadiator(s.to_string())),
76        }
77    }
78}
79impl Converter {
80    /// Converter thickness in meters.
81    #[must_use]
82    pub fn thickness(&self) -> Option<f64> {
83        match self {
84            Converter::Retracted | Converter::Unknown => None,
85            Converter::Be750um => Some(750e-6),
86            Converter::Be75um => Some(75e-6),
87            Converter::Be50um => Some(50e-6),
88        }
89    }
90    /// Converter thickness in units of radiation length.
91    #[must_use]
92    pub fn radiation_lengths(&self) -> Option<f64> {
93        self.thickness()
94            .map(|t| t / BERILLIUM_RADIATION_LENGTH_METERS)
95    }
96}
97
98/// Nominal liquid-hydrogen target length in centimeters.
99pub const TARGET_LENGTH_CM: f64 = 29.5;
100/// Avogadro constant in mol^-1.
101pub const AVOGADRO_CONSTANT: f64 = 6.022_140_76e23;
102const RP2019_11_OVERRIDE_START: RunNumber = 72436;
103fn rp2019_11_override_timestamp() -> DateTime<Utc> {
104    Utc.with_ymd_and_hms(2021, 4, 23, 0, 0, 1).unwrap()
105}
106
107#[derive(Debug, Clone)]
108/// Selection options used when computing flux and luminosity histograms.
109pub struct LuminosityContext {
110    runs: Vec<RunNumber>,
111    rest_version: HashMap<RunPeriod, RESTVersionSelection>,
112    coherent_peak: bool,
113    polarized: bool,
114    exclude_runs: Vec<RunNumber>,
115}
116
117impl LuminosityContext {
118    /// Create a context from explicit runs and per-period REST version selection.
119    ///
120    /// # Errors
121    /// Returns [`LuminosityError::EmptyRunSelection`] if `runs` is empty.
122    pub fn new(
123        runs: Vec<RunNumber>,
124        rest_version: HashMap<RunPeriod, RESTVersionSelection>,
125    ) -> Result<Self, LuminosityError> {
126        let mut runs = runs;
127        runs.sort_unstable();
128        runs.dedup();
129        if runs.is_empty() {
130            return Err(LuminosityError::EmptyRunSelection);
131        }
132        Ok(Self {
133            runs,
134            rest_version,
135            coherent_peak: false,
136            polarized: false,
137            exclude_runs: Vec::new(),
138        })
139    }
140
141    /// Sorted unique runs to include in the calculation.
142    #[must_use]
143    pub fn runs(&self) -> &[RunNumber] {
144        &self.runs
145    }
146
147    /// Per-run-period REST version selection map.
148    #[must_use]
149    pub fn rest_version(&self) -> &HashMap<RunPeriod, RESTVersionSelection> {
150        &self.rest_version
151    }
152
153    /// Whether coherent-peak-only flux should be used.
154    #[must_use]
155    pub fn coherent_peak(&self) -> bool {
156        self.coherent_peak
157    }
158
159    /// Whether polarized beam constraints and constants should be used.
160    #[must_use]
161    pub fn polarized(&self) -> bool {
162        self.polarized
163    }
164
165    /// Runs excluded after the primary run selection.
166    #[must_use]
167    pub fn exclude_runs(&self) -> &[RunNumber] {
168        &self.exclude_runs
169    }
170
171    /// Replace the run list with a new sorted unique run set.
172    ///
173    /// # Errors
174    /// Returns [`LuminosityError::EmptyRunSelection`] if `runs` is empty.
175    pub fn with_runs(
176        mut self,
177        runs: impl IntoIterator<Item = RunNumber>,
178    ) -> Result<Self, LuminosityError> {
179        let mut run_list: Vec<RunNumber> = runs.into_iter().collect();
180        run_list.sort_unstable();
181        run_list.dedup();
182        if run_list.is_empty() {
183            return Err(LuminosityError::EmptyRunSelection);
184        }
185        self.runs = run_list;
186        Ok(self)
187    }
188
189    /// Add runs to the existing run set.
190    #[must_use]
191    pub fn add_runs(mut self, runs: impl IntoIterator<Item = RunNumber>) -> Self {
192        self.runs.extend(runs);
193        self.runs.sort_unstable();
194        self.runs.dedup();
195        self
196    }
197
198    /// Add all runs from a run period to the existing run set.
199    #[must_use]
200    pub fn with_run_period(mut self, run_period: RunPeriod) -> Self {
201        self.runs.extend(run_period.iter_runs());
202        self.runs.sort_unstable();
203        self.runs.dedup();
204        self
205    }
206
207    /// Override REST version selection for a run period.
208    #[must_use]
209    pub fn with_rest_version(
210        mut self,
211        run_period: RunPeriod,
212        selection: RESTVersionSelection,
213    ) -> Self {
214        self.rest_version.insert(run_period, selection);
215        self
216    }
217
218    /// Enable or disable coherent-peak-only flux selection.
219    #[must_use]
220    pub fn with_coherent_peak(mut self, enabled: bool) -> Self {
221        self.coherent_peak = enabled;
222        self
223    }
224
225    /// Enable or disable polarized beam selection.
226    #[must_use]
227    pub fn with_polarized(mut self, enabled: bool) -> Self {
228        self.polarized = enabled;
229        self
230    }
231
232    /// Add runs that should be excluded from processing.
233    #[must_use]
234    pub fn with_exclude_runs(mut self, runs: impl IntoIterator<Item = RunNumber>) -> Self {
235        self.exclude_runs.extend(runs);
236        self.exclude_runs.sort_unstable();
237        self.exclude_runs.dedup();
238        self
239    }
240}
241
242#[derive(Debug, Clone)]
243/// Entry point for tagged flux and luminosity calculations.
244pub struct Luminosity {
245    rcdb: PathBuf,
246    ccdb: PathBuf,
247}
248
249impl Default for Luminosity {
250    fn default() -> Self {
251        let rcdb = env::var("RCDB_CONNECTION")
252            .expect("RCDB_CONNECTION is not set for Luminosity::default()");
253        let ccdb = env::var("CCDB_CONNECTION")
254            .expect("CCDB_CONNECTION is not set for Luminosity::default()");
255        Self {
256            rcdb: PathBuf::from(rcdb),
257            ccdb: PathBuf::from(ccdb),
258        }
259    }
260}
261
262impl Luminosity {
263    /// Create a calculator from RCDB and CCDB `SQLite` paths.
264    pub fn new(rcdb: impl AsRef<Path>, ccdb: impl AsRef<Path>) -> Self {
265        Self {
266            rcdb: rcdb.as_ref().to_path_buf(),
267            ccdb: ccdb.as_ref().to_path_buf(),
268        }
269    }
270}
271
272#[derive(Debug, Clone)]
273/// Cached per-run CCDB/RCDB calibration data used to build histograms.
274pub struct FluxCache {
275    /// Combined livetime and converter scaling factor.
276    pub livetime_scaling: f64,
277    /// Pair-spectrometer acceptance parameters `(p0, p1, p2)`.
278    pub pair_spectrometer_parameters: (f64, f64, f64),
279    /// Photon endpoint energy in `GeV`.
280    pub photon_endpoint_energy: f64,
281    /// TAGM tagged flux rows `(column, flux, error)`.
282    pub tagm_tagged_flux: Vec<(f64, f64, f64)>,
283    /// TAGM scaled-energy ranges `(emin, emax)`.
284    pub tagm_scaled_energy_range: Vec<(f64, f64)>,
285    /// TAGH tagged flux rows `(counter, flux, error)`.
286    pub tagh_tagged_flux: Vec<(f64, f64, f64)>,
287    /// TAGH scaled-energy ranges `(emin, emax)`.
288    pub tagh_scaled_energy_range: Vec<(f64, f64)>,
289    /// Optional endpoint calibration correction in `GeV`.
290    pub photon_endpoint_calibration: Option<f64>,
291    /// Number of target scattering centers and uncertainty `(value, error)`.
292    pub target_scattering_centers: (f64, f64),
293}
294
295#[allow(clippy::too_many_lines)]
296fn get_flux_cache(
297    run_period: RunPeriod,
298    runs: &[RunNumber],
299    polarized: bool,
300    timestamp: DateTime<Utc>,
301    rcdb_path: &Path,
302    ccdb_path: &Path,
303) -> Result<HashMap<RunNumber, FluxCache>, LuminosityError> {
304    if runs.is_empty() {
305        return Ok(HashMap::new());
306    }
307    let rcdb = RCDB::open(rcdb_path)?;
308    let mut rcdb_filters = gluex_rcdb::conditions::aliases::approved_production(run_period);
309    if polarized {
310        rcdb_filters = gluex_rcdb::conditions::all([
311            rcdb_filters,
312            gluex_rcdb::conditions::aliases::is_coherent_beam(),
313        ]);
314    }
315    let polarimeter_converter: HashMap<RunNumber, Converter> = rcdb
316        .fetch(
317            ["polarimeter_converter"],
318            &RCDBContext::default()
319                .with_runs(runs.iter().copied())
320                .filter(rcdb_filters),
321        )?
322        .into_iter()
323        .map(|(r, pc_map)| {
324            let mut converter = pc_map["polarimeter_converter"]
325                .as_string()
326                .unwrap()
327                .parse()?;
328            if !matches!(
329                converter,
330                Converter::Be75um | Converter::Be750um | Converter::Be50um,
331            ) && r > 10633
332                && r < 10694
333            {
334                converter = Converter::Be75um; // no converter in RCDB but 75um found in logbook
335            }
336            Ok((r, converter))
337        })
338        .collect::<Result<HashMap<RunNumber, Converter>, LuminosityError>>()?;
339    let ccdb = CCDB::open(ccdb_path)?;
340    let ccdb_context = CCDBContext::default().with_runs(runs.iter().copied());
341    let ccdb_context_restver = ccdb_context.clone().with_timestamp(timestamp);
342    let livetime_ratio: HashMap<RunNumber, f64> = ccdb
343        .fetch(
344            "/PHOTON_BEAM/pair_spectrometer/lumi/trig_live",
345            &ccdb_context,
346        )?
347        .into_iter()
348        .filter_map(|(r, d)| {
349            let livetime = d.column(1)?;
350            let live = livetime.row(0).as_double()?;
351            let total = livetime.row(3).as_double()?;
352            Some((r, if total > 0.0 { live / total } else { 1.0 }))
353        })
354        .collect::<HashMap<_, _>>();
355    let livetime_scaling: HashMap<RunNumber, f64> = polarimeter_converter
356        .into_iter()
357        .filter_map(|(r, c)| {
358            // See https://doi.org/10.1103/RevModPhys.46.815 Section IV parts B, C, and D
359            Some((
360                r,
361                livetime_ratio.get(&r).unwrap_or(&1.0) * 9.0 / (7.0 * c.radiation_lengths()?),
362            ))
363        })
364        .collect();
365    let pair_spectrometer_parameters = fetch_pair_spectrometer_parameters(&ccdb, &ccdb_context)?;
366    let mut photon_endpoint_energy = fetch_photon_endpoint_energy(&ccdb, &ccdb_context_restver)?;
367    let microscope_tagged_flux = fetch_tagm_tagged_flux(&ccdb, &ccdb_context)?;
368    let mut microscope_scaled_energy_range =
369        fetch_tagm_scaled_energy_range(&ccdb, &ccdb_context_restver)?;
370    let hodoscope_tagged_flux = fetch_tagh_tagged_flux(&ccdb, &ccdb_context)?;
371    let mut hodoscope_scaled_energy_range =
372        fetch_tagh_scaled_energy_range(&ccdb, &ccdb_context_restver)?;
373    let mut photon_endpoint_calibration =
374        fetch_photon_endpoint_calibration(&ccdb, &ccdb_context_restver)?;
375    // Density is in mg/cm^3, so to get the number of scattering centers, we multiply density by
376    // the target length to get mg/cm^2, then we multiply by 1e-3 to get g/cm^2. We then multiply
377    // by 1e-24 cm^2/barn to get g/barn, and finally by Avogadro's constant to get g/(mol * barn).
378    // Finally, we divide by 1 g/mol (proton molar mass) to get protons/barn
379    let factor = 1e-24 * AVOGADRO_CONSTANT * 1e-3 * TARGET_LENGTH_CM;
380    let target_scattering_centers: HashMap<RunNumber, (f64, f64)> = ccdb
381        .fetch("/TARGET/density", &ccdb_context)?
382        .into_iter()
383        .filter_map(|(r, d)| Some((r, (d.double(0, 0)? * factor, d.double(1, 0)? * factor))))
384        .collect();
385
386    if run_period == RunPeriod::RP2019_11 {
387        let override_context = ccdb_context
388            .clone()
389            .with_timestamp(rp2019_11_override_timestamp());
390        apply_run_override(
391            &mut photon_endpoint_energy,
392            fetch_photon_endpoint_energy(&ccdb, &override_context)?,
393            RP2019_11_OVERRIDE_START,
394            run_period.max_run(),
395        );
396        apply_run_override(
397            &mut microscope_scaled_energy_range,
398            fetch_tagm_scaled_energy_range(&ccdb, &override_context)?,
399            RP2019_11_OVERRIDE_START,
400            run_period.max_run(),
401        );
402        apply_run_override(
403            &mut hodoscope_scaled_energy_range,
404            fetch_tagh_scaled_energy_range(&ccdb, &override_context)?,
405            RP2019_11_OVERRIDE_START,
406            run_period.max_run(),
407        );
408        apply_run_override(
409            &mut photon_endpoint_calibration,
410            fetch_photon_endpoint_calibration(&ccdb, &override_context)?,
411            RP2019_11_OVERRIDE_START,
412            run_period.max_run(),
413        );
414    }
415    Ok(livetime_scaling
416        .into_iter()
417        .filter_map(|(r, livetime_scaling)| {
418            let pair_spectrometer_parameters = *pair_spectrometer_parameters.get(&r)?;
419            let photon_endpoint_energy = *photon_endpoint_energy.get(&r)?;
420            let photon_endpoint_calibration = photon_endpoint_calibration.get(&r).copied();
421            let target_scattering_centers = *target_scattering_centers.get(&r)?;
422            Some((
423                r,
424                FluxCache {
425                    livetime_scaling,
426                    pair_spectrometer_parameters,
427                    photon_endpoint_energy,
428                    tagm_tagged_flux: microscope_tagged_flux.get(&r)?.clone(),
429                    tagm_scaled_energy_range: microscope_scaled_energy_range.get(&r)?.clone(),
430                    tagh_tagged_flux: hodoscope_tagged_flux.get(&r)?.clone(),
431                    tagh_scaled_energy_range: hodoscope_scaled_energy_range.get(&r)?.clone(),
432                    photon_endpoint_calibration,
433                    target_scattering_centers,
434                },
435            ))
436        })
437        .collect())
438}
439
440/// Photon flux and luminosity histograms aggregated across TAGM and TAGH detectors.
441#[derive(Debug, Clone, Serialize, Deserialize)]
442pub struct FluxHistograms {
443    /// Total photon flux summed over TAGM and TAGH detectors as a [`Histogram`].
444    pub tagged_flux: Histogram,
445    /// Photon flux measured by the microscope (TAGM) detector only as a [`Histogram`].
446    pub tagm_flux: Histogram,
447    /// Photon flux measured by the hodoscope (TAGH) detector only as a [`Histogram`].
448    pub tagh_flux: Histogram,
449    /// Tagged luminosity derived from the flux and scattering-center constants as a [`Histogram`].
450    pub tagged_luminosity: Histogram,
451}
452
453fn pair_spectrometer_acceptance(x: f64, args: (f64, f64, f64)) -> f64 {
454    let (p0, p1, p2) = args;
455    if x > 2.0 * p1 && x < p1 + p2 {
456        return p0 * (1.0 - 2.0 * p1 / x);
457    }
458    if x >= p1 + p2 {
459        return p0 * (2.0 * p2 / x - 1.0);
460    }
461    0.0
462}
463
464fn fetch_pair_spectrometer_parameters(
465    ccdb: &CCDB,
466    context: &CCDBContext,
467) -> Result<HashMap<RunNumber, (f64, f64, f64)>, CCDBError> {
468    Ok(ccdb
469        .fetch("/PHOTON_BEAM/pair_spectrometer/lumi/PS_accept", context)?
470        .into_iter()
471        .filter_map(|(r, d)| {
472            let row = d.row(0).ok()?;
473            Some((r, (row.double(0)?, row.double(1)?, row.double(2)?)))
474        })
475        .collect())
476}
477
478fn fetch_photon_endpoint_energy(
479    ccdb: &CCDB,
480    context: &CCDBContext,
481) -> Result<HashMap<RunNumber, f64>, CCDBError> {
482    Ok(ccdb
483        .fetch("/PHOTON_BEAM/endpoint_energy", context)?
484        .into_iter()
485        .filter_map(|(r, d)| Some((r, d.value(0, 0)?.as_double()?)))
486        .collect())
487}
488
489#[allow(clippy::type_complexity)]
490fn fetch_tagm_tagged_flux(
491    ccdb: &CCDB,
492    context: &CCDBContext,
493) -> Result<HashMap<RunNumber, Vec<(f64, f64, f64)>>, CCDBError> {
494    Ok(ccdb
495        .fetch("/PHOTON_BEAM/pair_spectrometer/lumi/tagm/tagged", context)?
496        .into_iter()
497        .map(|(r, d)| {
498            (
499                r,
500                d.iter_rows()
501                    .filter_map(|row| Some((row.double(0)?, row.double(1)?, row.double(2)?)))
502                    .collect::<Vec<_>>(),
503            )
504        })
505        .collect())
506}
507
508fn fetch_tagm_scaled_energy_range(
509    ccdb: &CCDB,
510    context: &CCDBContext,
511) -> Result<HashMap<RunNumber, Vec<(f64, f64)>>, CCDBError> {
512    Ok(ccdb
513        .fetch("/PHOTON_BEAM/microscope/scaled_energy_range", context)?
514        .into_iter()
515        .map(|(r, d)| {
516            (
517                r,
518                d.iter_rows()
519                    .filter_map(|row| Some((row.double(1)?, row.double(2)?)))
520                    .collect::<Vec<_>>(),
521            )
522        })
523        .collect())
524}
525
526#[allow(clippy::type_complexity)]
527fn fetch_tagh_tagged_flux(
528    ccdb: &CCDB,
529    context: &CCDBContext,
530) -> Result<HashMap<RunNumber, Vec<(f64, f64, f64)>>, CCDBError> {
531    Ok(ccdb
532        .fetch("/PHOTON_BEAM/pair_spectrometer/lumi/tagh/tagged", context)?
533        .into_iter()
534        .map(|(r, d)| {
535            (
536                r,
537                d.iter_rows()
538                    .filter_map(|row| Some((row.double(0)?, row.double(1)?, row.double(2)?)))
539                    .collect::<Vec<_>>(),
540            )
541        })
542        .collect())
543}
544
545fn fetch_tagh_scaled_energy_range(
546    ccdb: &CCDB,
547    context: &CCDBContext,
548) -> Result<HashMap<RunNumber, Vec<(f64, f64)>>, CCDBError> {
549    Ok(ccdb
550        .fetch("/PHOTON_BEAM/hodoscope/scaled_energy_range", context)?
551        .into_iter()
552        .map(|(r, d)| {
553            (
554                r,
555                d.iter_rows()
556                    .filter_map(|row| Some((row.double(1)?, row.double(2)?)))
557                    .collect::<Vec<_>>(),
558            )
559        })
560        .collect())
561}
562
563fn fetch_photon_endpoint_calibration(
564    ccdb: &CCDB,
565    context: &CCDBContext,
566) -> Result<HashMap<RunNumber, f64>, CCDBError> {
567    Ok(ccdb
568        .fetch("/PHOTON_BEAM/hodoscope/endpoint_calib", context)?
569        .into_iter()
570        .filter_map(|(r, d)| Some((r, d.double(0, 0)?)))
571        .collect())
572}
573
574fn apply_run_override<T>(
575    target: &mut HashMap<RunNumber, T>,
576    overrides: HashMap<RunNumber, T>,
577    run_min: RunNumber,
578    run_max: RunNumber,
579) {
580    for (run, value) in overrides {
581        if run >= run_min && run <= run_max {
582            target.insert(run, value);
583        }
584    }
585}
586
587impl Luminosity {
588    /// Construct tagged photon-flux and luminosity histograms for a run context.
589    ///
590    /// # Arguments
591    /// * `edges` - Photon-energy bin edges used to construct output [`Histogram`]s.
592    /// * `ctx` - [`LuminosityContext`] defining runs, REST versions, and selection flags.
593    ///
594    /// # Returns
595    /// [`FluxHistograms`] for flux and tagged luminosity that satisfy the requested selections.
596    ///
597    /// # Errors
598    /// Returns a [`LuminosityError`] if RCDB/CCDB data cannot be fetched or the run
599    /// selection is invalid after filtering.
600    #[allow(clippy::too_many_lines)]
601    pub fn fetch(
602        &self,
603        edges: &[f64],
604        ctx: &LuminosityContext,
605    ) -> Result<FluxHistograms, LuminosityError> {
606        let mut cache: HashMap<RunNumber, FluxCache> = HashMap::new();
607        let coherent_peak = ctx.coherent_peak();
608        let mut tagged_flux_hist = Histogram::empty(edges)?;
609        let mut microscope_flux_hist = Histogram::empty(edges)?;
610        let mut hodoscope_flux_hist = Histogram::empty(edges)?;
611        let mut tagged_luminosity_hist = Histogram::empty(edges)?;
612        let mut run_numbers: Vec<RunNumber> = ctx.runs().to_vec();
613        if !ctx.exclude_runs().is_empty() {
614            let exclude_set: HashSet<RunNumber> = ctx.exclude_runs().iter().copied().collect();
615            run_numbers.retain(|run| !exclude_set.contains(run));
616        }
617        if run_numbers.is_empty() {
618            return Err(LuminosityError::EmptyRunSelection);
619        }
620        let mut runs_by_period: HashMap<RunPeriod, Vec<RunNumber>> = HashMap::new();
621        for run in &run_numbers {
622            let period = RunPeriod::try_from(*run)?;
623            runs_by_period.entry(period).or_default().push(*run);
624        }
625        let mut run_periods: Vec<RunPeriod> = runs_by_period.keys().copied().collect();
626        run_periods.sort_unstable();
627        for rp in &run_periods {
628            let selection = ctx
629                .rest_version()
630                .get(rp)
631                .copied()
632                .unwrap_or(RESTVersionSelection::Current);
633            let timestamp = selection.resolve_timestamp(*rp)?;
634            cache.extend(get_flux_cache(
635                *rp,
636                runs_by_period
637                    .get(rp)
638                    .map_or(&[][..], |runs| runs.as_slice()),
639                ctx.polarized(),
640                timestamp,
641                &self.rcdb,
642                &self.ccdb,
643            )?);
644        }
645        for run in run_numbers {
646            if let Some(data) = cache.get(&run) {
647                let delta_e = match data.photon_endpoint_calibration {
648                    Some(calibration) => data.photon_endpoint_energy - calibration,
649                    None if run > 60000 => {
650                        return Err(LuminosityError::MissingEndpointCalibration(run));
651                    }
652                    None => 0.0,
653                };
654                // Fill microscope
655                for (tagged_flux, e_range) in data
656                    .tagm_tagged_flux
657                    .iter()
658                    .zip(data.tagm_scaled_energy_range.iter())
659                {
660                    let energy =
661                        data.photon_endpoint_energy * (e_range.0 + e_range.1) * 0.5 + delta_e;
662
663                    if coherent_peak {
664                        let (coherent_peak_low, coherent_peak_high) =
665                            gluex_core::run_periods::coherent_peak(run);
666                        if energy < coherent_peak_low || energy > coherent_peak_high {
667                            continue;
668                        }
669                    }
670                    let acceptance =
671                        pair_spectrometer_acceptance(energy, data.pair_spectrometer_parameters);
672                    if acceptance <= 0.0 {
673                        continue;
674                    }
675                    if let Some(ibin) = tagged_flux_hist.get_index(energy) {
676                        let count = tagged_flux.1 * data.livetime_scaling / acceptance;
677                        let error = tagged_flux.2 * data.livetime_scaling / acceptance;
678                        tagged_flux_hist.counts[ibin] += count;
679                        tagged_flux_hist.errors[ibin] = tagged_flux_hist.errors[ibin].hypot(error);
680                        microscope_flux_hist.counts[ibin] += count;
681                        microscope_flux_hist.errors[ibin] =
682                            microscope_flux_hist.errors[ibin].hypot(error);
683                    }
684                }
685                // Fill hodoscope
686                for (tagged_flux, e_range) in data
687                    .tagh_tagged_flux
688                    .iter()
689                    .zip(data.tagh_scaled_energy_range.iter())
690                {
691                    let energy =
692                        data.photon_endpoint_energy * (e_range.0 + e_range.1) * 0.5 + delta_e;
693
694                    if coherent_peak {
695                        let (coherent_peak_low, coherent_peak_high) =
696                            gluex_core::run_periods::coherent_peak(run);
697                        if energy < coherent_peak_low || energy > coherent_peak_high {
698                            continue;
699                        }
700                    }
701                    let acceptance =
702                        pair_spectrometer_acceptance(energy, data.pair_spectrometer_parameters);
703                    if acceptance <= 0.0 {
704                        continue;
705                    }
706                    if let Some(ibin) = tagged_flux_hist.get_index(energy) {
707                        let count = tagged_flux.1 * data.livetime_scaling / acceptance;
708                        let error = tagged_flux.2 * data.livetime_scaling / acceptance;
709                        tagged_flux_hist.counts[ibin] += count;
710                        tagged_flux_hist.errors[ibin] = tagged_flux_hist.errors[ibin].hypot(error);
711                        hodoscope_flux_hist.counts[ibin] += count;
712                        hodoscope_flux_hist.errors[ibin] =
713                            hodoscope_flux_hist.errors[ibin].hypot(error);
714                    }
715                }
716                let (n_scattering_centers, n_scattering_centers_error) =
717                    data.target_scattering_centers;
718                for ibin in 0..tagged_flux_hist.bins() {
719                    let count = tagged_flux_hist.counts[ibin];
720                    if count <= 0.0 {
721                        continue;
722                    }
723                    let luminosity = count * n_scattering_centers / 1e12; // pb^-1
724                    let flux_error = tagged_flux_hist.errors[ibin] / count;
725                    let target_error = n_scattering_centers_error / n_scattering_centers;
726                    tagged_luminosity_hist.counts[ibin] = luminosity;
727                    tagged_luminosity_hist.errors[ibin] =
728                        luminosity * target_error.hypot(flux_error);
729                }
730            }
731        }
732        Ok(FluxHistograms {
733            tagged_flux: tagged_flux_hist,
734            tagm_flux: microscope_flux_hist,
735            tagh_flux: hodoscope_flux_hist,
736            tagged_luminosity: tagged_luminosity_hist,
737        })
738    }
739}