use super::boc::{BinsWithFillLimits, Channel, Kinematics, Order, ScaleFuncForm, Scales};
use super::convolutions::{Conv, ConvolutionCache};
use super::error::{Error, Result};
use super::evolution::{self, AlphasTable, EvolveInfo, OperatorSliceInfo};
use super::fk_table::FkTable;
use super::interpolation::Interp;
use super::packed_array::PackedArray;
use super::pids::PidBasis;
use super::reference::Reference;
use super::subgrid::{
self, EmptySubgridV1, ImportSubgridV1, InterpSubgridV1, Subgrid, SubgridEnum,
};
use super::v0;
use bitflags::bitflags;
use float_cmp::approx_eq;
use git_version::git_version;
use itertools::Itertools;
use lz4_flex::frame::{FrameDecoder, FrameEncoder};
use ndarray::{s, Array2, Array3, ArrayView3, ArrayViewMut3, Axis, CowArray, Dimension, Ix4, Zip};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::io::{BufRead, BufReader, BufWriter, Read, Write};
use std::ops::{Bound, RangeBounds};
use std::{iter, mem};
const BIN_AXIS: Axis = Axis(1);
const CHANNEL_AXIS: Axis = Axis(2);
#[derive(Clone, Deserialize, Serialize)]
struct Mmv4;
#[derive(Clone, Deserialize, Serialize)]
enum MoreMembers {
V4(Mmv4),
}
bitflags! {
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct GridOptFlags: u32 {
const OPTIMIZE_SUBGRID_TYPE = 0b1;
const OPTIMIZE_NODES = 0b10;
const STATIC_SCALE_DETECTION = 0b10;
const SYMMETRIZE_CHANNELS = 0b100;
const STRIP_EMPTY_ORDERS = 0b1000;
const MERGE_SAME_CHANNELS = 0b10000;
const STRIP_EMPTY_CHANNELS = 0b10_0000;
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct Grid {
subgrids: Array3<SubgridEnum>,
bwfl: BinsWithFillLimits,
orders: Vec<Order>,
channels: Vec<Channel>,
pid_basis: PidBasis,
convolutions: Vec<Conv>,
interps: Vec<Interp>,
kinematics: Vec<Kinematics>,
scales: Scales,
metadata: BTreeMap<String, String>,
more_members: MoreMembers,
reference: Reference,
}
impl Grid {
#[must_use]
pub fn new(
bwfl: BinsWithFillLimits,
orders: Vec<Order>,
channels: Vec<Channel>,
pid_basis: PidBasis,
convolutions: Vec<Conv>,
interps: Vec<Interp>,
kinematics: Vec<Kinematics>,
scales: Scales,
) -> Self {
for (channel_idx, channel) in channels.iter().enumerate() {
let offending_entry = channel
.entry()
.iter()
.find_map(|(pids, _)| (pids.len() != convolutions.len()).then_some(pids.len()));
if let Some(pids_len) = offending_entry {
panic!("channel #{channel_idx} has wrong number of PIDs: expected {}, found {pids_len}", convolutions.len());
}
}
assert_eq!(
interps.len(),
kinematics.len(),
"interps and kinematics have different lengths: {} vs. {}",
interps.len(),
kinematics.len(),
);
assert!(
scales.compatible_with(&kinematics),
"scales and kinematics are not compatible"
);
Self {
subgrids: Array3::from_shape_simple_fn(
(orders.len(), bwfl.len(), channels.len()),
|| EmptySubgridV1.into(),
),
bwfl,
orders,
channels,
pid_basis,
convolutions,
interps,
kinematics,
scales,
metadata: iter::once((
"pineappl_gitversion".to_owned(),
git_version!(
args = ["--always", "--dirty", "--long", "--tags"],
cargo_prefix = "cargo:",
fallback = "unknown"
)
.to_owned(),
))
.collect(),
more_members: MoreMembers::V4(Mmv4),
reference: Reference::default(),
}
}
#[must_use]
pub const fn reference(&self) -> &Reference {
&self.reference
}
pub fn set_reference(&mut self, reference: Reference) {
self.reference = reference;
}
#[must_use]
pub const fn pid_basis(&self) -> &PidBasis {
&self.pid_basis
}
#[must_use]
pub fn interpolations(&self) -> &[Interp] {
&self.interps
}
#[must_use]
pub fn kinematics(&self) -> &[Kinematics] {
&self.kinematics
}
#[must_use]
pub const fn scales(&self) -> &Scales {
&self.scales
}
pub fn convolve(
&self,
cache: &mut ConvolutionCache,
order_mask: &[bool],
bin_indices: &[usize],
channel_mask: &[bool],
xi: &[(f64, f64, f64)],
) -> Vec<f64> {
let mut cache = cache.new_grid_conv_cache(self, xi);
let bin_indices = if bin_indices.is_empty() {
(0..self.bwfl().len()).collect()
} else {
bin_indices.to_vec()
};
let mut bins = vec![0.0; bin_indices.len() * xi.len()];
let normalizations = self.bwfl().normalizations();
let pdg_channels = self.channels_pdg();
for (xi_index, &xis @ (xir, xif, xia)) in xi.iter().enumerate() {
for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() {
let order = &self.orders[ord];
if ((order.logxir != 0) && approx_eq!(f64, xir, 1.0, ulps = 4))
|| ((order.logxif != 0) && approx_eq!(f64, xif, 1.0, ulps = 4))
|| ((order.logxia != 0) && approx_eq!(f64, xia, 1.0, ulps = 4))
{
continue;
}
if (!order_mask.is_empty() && !order_mask[ord])
|| (!channel_mask.is_empty() && !channel_mask[chan])
{
continue;
}
let Some(bin_index) = bin_indices.iter().position(|&index| index == bin) else {
continue;
};
if subgrid.is_empty() {
continue;
}
let channel = &pdg_channels[chan];
let mut value = 0.0;
cache.set_grids(self, subgrid, xis);
for (idx, v) in subgrid.indexed_iter() {
let mut lumi = 0.0;
for entry in channel.entry() {
let fx_prod = cache.as_fx_prod(&entry.0, order.alphas, &idx);
lumi += fx_prod * entry.1;
}
value += lumi * v;
}
if order.logxir != 0 {
value *= (xir * xir).ln().powi(order.logxir.into());
}
if order.logxif != 0 {
value *= (xif * xif).ln().powi(order.logxif.into());
}
if order.logxia != 0 {
value *= (xia * xia).ln().powi(order.logxia.into());
}
bins[xi_index + xi.len() * bin_index] += value / normalizations[bin];
}
}
bins
}
pub fn fill(
&mut self,
order: usize,
observable: f64,
channel: usize,
ntuple: &[f64],
weight: f64,
) {
if let Some(bin) = self.bwfl().fill_index(observable) {
let subgrid = &mut self.subgrids[[order, bin, channel]];
if let SubgridEnum::EmptySubgridV1(_) = subgrid {
*subgrid = InterpSubgridV1::new(&self.interps).into();
}
subgrid.fill(&self.interps, ntuple, weight);
}
}
pub fn read(reader: impl Read) -> Result<Self> {
let mut reader = BufReader::new(reader);
let buffer = reader.fill_buf().map_err(|err| Error::Other(err.into()))?;
let magic_bytes: [u8; 4] = buffer[0..4].try_into().unwrap_or_else(|_| unreachable!());
if u32::from_le_bytes(magic_bytes) == 0x18_4D_22_04 {
Self::read_uncompressed(FrameDecoder::new(reader))
} else {
Self::read_uncompressed(reader)
}
}
fn read_uncompressed(mut reader: impl BufRead) -> Result<Self> {
let magic_bytes: [u8; 16] = reader.fill_buf().map_err(|err| Error::Other(err.into()))?
[0..16]
.try_into()
.unwrap_or_else(|_| unreachable!());
let file_version = if &magic_bytes[0..8] == b"PineAPPL" {
reader.consume(16);
u64::from_le_bytes(
magic_bytes[8..16]
.try_into()
.unwrap_or_else(|_| unreachable!()),
)
} else {
0
};
match file_version {
0 => v0::read_uncompressed_v0(reader),
1 => bincode::deserialize_from(reader).map_err(|err| Error::Other(err.into())),
_ => Err(Error::General(format!(
"file version {file_version} is not supported"
))),
}
}
pub fn write(&self, writer: impl Write) -> Result<()> {
let mut writer = BufWriter::new(writer);
let file_header = b"PineAPPL\x01\0\0\0\0\0\0\0";
writer
.write(file_header)
.map_err(|err| Error::Other(err.into()))?;
bincode::serialize_into(writer, self).map_err(|err| Error::Other(err.into()))
}
pub fn write_lz4(&self, writer: impl Write) -> Result<()> {
let mut encoder = FrameEncoder::new(writer);
self.write(&mut encoder)?;
encoder
.try_finish()
.map_err(|err| Error::Other(err.into()))?;
Ok(())
}
#[must_use]
pub fn channels(&self) -> &[Channel] {
&self.channels
}
fn channels_pdg(&self) -> Vec<Channel> {
self.channels()
.iter()
.cloned()
.map(|channel| self.pid_basis().translate(PidBasis::Pdg, channel))
.collect()
}
pub fn merge_bins(&mut self, range: impl RangeBounds<usize>) -> Result<()> {
let range_start = match range.start_bound().cloned() {
Bound::Included(start) => start,
Bound::Excluded(start) => start + 1,
Bound::Unbounded => 0,
};
let range_end = match range.end_bound().cloned() {
Bound::Included(end) => end + 1,
Bound::Excluded(end) => end,
Bound::Unbounded => self.bwfl().len(),
};
self.bwfl = self
.bwfl()
.merge(range_start..range_end)
.unwrap_or_else(|_| unreachable!());
let (intermediate, right) = self.subgrids.view().split_at(BIN_AXIS, range_end);
let (left, merge) = intermediate.split_at(BIN_AXIS, range_start);
let mut merged: Array2<SubgridEnum> = Array2::from_elem(
(self.orders().len(), self.channels().len()),
EmptySubgridV1.into(),
);
for subview in merge.axis_iter(BIN_AXIS) {
Zip::from(&mut merged)
.and(subview)
.for_each(|lhs, rhs| lhs.merge(rhs, None));
}
let merged = merged.insert_axis(BIN_AXIS);
self.subgrids = ndarray::concatenate(BIN_AXIS, &[left, merged.view(), right])
.unwrap_or_else(|_| unreachable!());
Ok(())
}
pub fn merge(&mut self, mut other: Self) -> Result<()> {
if self.convolutions() != other.convolutions() {
return Err(Error::General("convolutions do not match".to_owned()));
}
if self.pid_basis() != other.pid_basis() {
return Err(Error::General("PID bases do not match".to_owned()));
}
if self.kinematics() != other.kinematics() {
return Err(Error::General("kinematics do not match".to_owned()));
}
if self.interpolations() != other.interpolations() {
return Err(Error::General("interpolations do not match".to_owned()));
}
if self.scales() != other.scales() {
return Err(Error::General("scales do not match".to_owned()));
}
let mut new_orders = Vec::new();
let mut new_bins = Vec::new();
let mut new_entries = Vec::new();
for ((i, j, k), subgrid) in other.subgrids.indexed_iter_mut() {
let other_order = &other.orders[i];
let other_bin = &other.bwfl.bins()[j];
let other_entry = &other.channels[k];
if !subgrid.is_empty()
&& !self
.orders
.iter()
.chain(new_orders.iter())
.any(|x| x == other_order)
{
new_orders.push(other_order.clone());
}
if !self
.bwfl
.bins()
.iter()
.chain(new_bins.iter())
.any(|b| b.partial_eq_with_ulps(other_bin, 8))
{
new_bins.push(other_bin.clone());
}
if !subgrid.is_empty()
&& !self
.channels()
.iter()
.chain(new_entries.iter())
.any(|y| y == other_entry)
{
new_entries.push(other_entry.clone());
}
}
if !new_orders.is_empty() || !new_entries.is_empty() || !new_bins.is_empty() {
let old_dim = self.subgrids.raw_dim().into_pattern();
let mut new_subgrids = Array3::from_shape_simple_fn(
(
old_dim.0 + new_orders.len(),
old_dim.1 + new_bins.len(),
old_dim.2 + new_entries.len(),
),
|| EmptySubgridV1.into(),
);
for (index, subgrid) in self.subgrids.indexed_iter_mut() {
mem::swap(&mut new_subgrids[<[usize; 3]>::from(index)], subgrid);
}
self.subgrids = new_subgrids;
}
let total_bins = u32::try_from(self.bwfl.bins().len() + new_bins.len())
.unwrap_or_else(|_| unreachable!());
if !new_bins.is_empty() {
self.bwfl = BinsWithFillLimits::new(
self.bwfl.bins().iter().chain(&new_bins).cloned().collect(),
(0..=total_bins).map(f64::from).collect(),
)?;
}
self.orders.append(&mut new_orders);
self.channels.append(&mut new_entries);
for ((i, j, k), subgrid) in other
.subgrids
.indexed_iter_mut()
.filter(|((_, _, _), subgrid)| !subgrid.is_empty())
{
let other_order = &other.orders[i];
let other_bin = &other.bwfl.bins()[j];
let other_entry = &other.channels[k];
let self_i = self
.orders
.iter()
.position(|x| x == other_order)
.unwrap_or_else(|| unreachable!());
let self_j = self
.bwfl()
.bins()
.iter()
.position(|b| b.partial_eq_with_ulps(other_bin, 8))
.unwrap_or_else(|| unreachable!());
let self_k = self
.channels
.iter()
.position(|y| y == other_entry)
.unwrap_or_else(|| unreachable!());
if self.subgrids[[self_i, self_j, self_k]].is_empty() {
mem::swap(&mut self.subgrids[[self_i, self_j, self_k]], subgrid);
} else {
self.subgrids[[self_i, self_j, self_k]].merge(subgrid, None);
}
}
Ok(())
}
#[must_use]
pub fn convolutions(&self) -> &[Conv] {
&self.convolutions
}
pub fn convolutions_mut(&mut self) -> &mut [Conv] {
&mut self.convolutions
}
pub fn charge_conjugate(&mut self, convolution: usize) {
let pid_basis = *self.pid_basis();
for channel in self.channels_mut() {
*channel = Channel::new(
channel
.entry()
.iter()
.cloned()
.map(|(mut pids, f)| {
let (cc_pid, f1) = pid_basis.charge_conjugate(pids[convolution]);
pids[convolution] = cc_pid;
(pids, f * f1)
})
.collect(),
);
}
self.convolutions_mut()[convolution] = self.convolutions()[convolution].cc();
}
pub fn scale(&mut self, factor: f64) {
self.subgrids
.iter_mut()
.for_each(|subgrid| subgrid.scale(factor));
}
pub fn repair(&mut self) -> bool {
let mut repaired = false;
for subgrid in &mut self.subgrids {
if !subgrid.is_empty() && subgrid.indexed_iter().count() == 0 {
*subgrid = EmptySubgridV1.into();
repaired = true;
}
}
repaired
}
pub fn scale_by_order(
&mut self,
alphas: f64,
alpha: f64,
logxir: f64,
logxif: f64,
logxia: f64,
global: f64,
) {
for ((i, _, _), subgrid) in self.subgrids.indexed_iter_mut() {
let order = &self.orders[i];
let factor = global
* alphas.powi(order.alphas.into())
* alpha.powi(order.alpha.into())
* logxir.powi(order.logxir.into())
* logxif.powi(order.logxif.into())
* logxia.powi(order.logxia.into());
subgrid.scale(factor);
}
}
pub fn scale_by_bin(&mut self, factors: &[f64]) {
for ((_, bin, _), subgrid) in self.subgrids.indexed_iter_mut() {
if let Some(&factor) = factors.get(bin) {
subgrid.scale(factor);
}
}
}
#[must_use]
pub fn orders(&self) -> &[Order] {
&self.orders
}
#[must_use]
pub fn orders_mut(&mut self) -> &mut [Order] {
&mut self.orders
}
pub fn channels_mut(&mut self) -> &mut [Channel] {
&mut self.channels
}
#[must_use]
pub fn subgrids(&self) -> ArrayView3<'_, SubgridEnum> {
self.subgrids.view()
}
#[must_use]
pub fn subgrids_mut(&mut self) -> ArrayViewMut3<'_, SubgridEnum> {
self.subgrids.view_mut()
}
pub fn set_bwfl(&mut self, bwfl: BinsWithFillLimits) -> Result<()> {
let bins = bwfl.len();
let grid_bins = self.bwfl().len();
if bins != grid_bins {
return Err(Error::General(format!(
"{bins} are given, but the grid has {grid_bins}"
)));
}
self.bwfl = bwfl;
Ok(())
}
#[must_use]
pub const fn bwfl(&self) -> &BinsWithFillLimits {
&self.bwfl
}
pub fn optimize(&mut self) {
self.optimize_using(GridOptFlags::all());
}
pub fn optimize_using(&mut self, flags: GridOptFlags) {
if flags.contains(GridOptFlags::OPTIMIZE_NODES) {
self.optimize_nodes();
}
if flags.contains(GridOptFlags::OPTIMIZE_SUBGRID_TYPE) {
self.optimize_subgrid_type();
}
if flags.contains(GridOptFlags::SYMMETRIZE_CHANNELS) {
self.symmetrize_channels();
}
if flags.contains(GridOptFlags::STRIP_EMPTY_ORDERS) {
self.strip_empty_orders();
}
if flags.contains(GridOptFlags::MERGE_SAME_CHANNELS) {
self.merge_same_channels();
}
if flags.contains(GridOptFlags::STRIP_EMPTY_CHANNELS) {
self.strip_empty_channels();
}
}
fn optimize_nodes(&mut self) {
for subgrid in &mut self.subgrids {
subgrid.optimize_nodes();
}
}
fn optimize_subgrid_type(&mut self) {
for subgrid in &mut self.subgrids {
match subgrid {
_ if subgrid.is_empty() => {
*subgrid = EmptySubgridV1.into();
}
_ => {
*subgrid = ImportSubgridV1::from(&*subgrid).into();
}
}
}
}
pub fn dedup_channels(&mut self, ulps: i64) {
let mut indices: Vec<usize> = (0..self.channels.len()).collect();
while let Some(index) = indices.pop() {
if let Some(other_index) = indices.iter().copied().find(|&other_index| {
let (mut a, mut b) = self
.subgrids
.multi_slice_mut((s![.., .., other_index], s![.., .., index]));
for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) {
let mut it_a = lhs.indexed_iter();
let mut it_b = rhs.indexed_iter();
loop {
let a = it_a.next();
let b = it_b.next();
match (a, b) {
(Some((tuple_a, value_a)), Some((tuple_b, value_b))) => {
if tuple_a != tuple_b {
return false;
}
let u = ulps;
if !approx_eq!(f64, value_a, value_b, ulps = u) {
return false;
}
}
(None, None) => break,
_ => return false,
}
}
}
true
}) {
let old_channel = self.channels.remove(index).entry().to_vec();
let mut new_channel = self.channels[other_index].entry().to_vec();
new_channel.extend(old_channel);
self.channels[other_index] = Channel::new(new_channel);
self.subgrids.remove_index(Axis(2), index);
}
}
}
fn merge_same_channels(&mut self) {
let mut indices: Vec<_> = (0..self.channels.len()).rev().collect();
while let Some(index) = indices.pop() {
if let Some((other_index, factor)) = indices.iter().find_map(|&i| {
self.channels[i]
.common_factor(&self.channels[index])
.map(|factor| (i, factor))
}) {
let (mut a, mut b) = self
.subgrids
.multi_slice_mut((s![.., .., other_index], s![.., .., index]));
for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) {
if !rhs.is_empty() {
rhs.scale(1.0 / factor);
if lhs.is_empty() {
*lhs = mem::replace(rhs, EmptySubgridV1.into());
} else {
lhs.merge(rhs, None);
*rhs = EmptySubgridV1.into();
}
}
}
}
}
}
fn strip_empty_channels(&mut self) {
let mut indices: Vec<_> = (0..self.channels().len()).collect();
while let Some(index) = indices.pop() {
if self
.subgrids
.slice(s![.., .., index])
.iter()
.all(Subgrid::is_empty)
{
self.channels.remove(index);
self.subgrids.remove_index(Axis(2), index);
}
}
}
fn strip_empty_orders(&mut self) {
let mut indices: Vec<_> = (0..self.orders().len()).collect();
while let Some(index) = indices.pop() {
if self
.subgrids
.slice(s![index, .., ..])
.iter()
.all(Subgrid::is_empty)
{
self.orders.remove(index);
self.subgrids.remove_index(Axis(0), index);
}
}
}
fn symmetrize_channels(&mut self) {
let pairs: Vec<_> = self
.convolutions()
.iter()
.enumerate()
.tuple_combinations()
.filter(|((_, conv_a), (_, conv_b))| conv_a == conv_b)
.map(|((idx_a, _), (idx_b, _))| (idx_a, idx_b))
.collect();
let (idx_a, idx_b) = match *pairs.as_slice() {
[] => return,
[pair] => pair,
_ => panic!("more than two equal convolutions found"),
};
let a_subgrid = self
.kinematics()
.iter()
.position(|&kin| kin == Kinematics::X(idx_a))
.unwrap();
let b_subgrid = self
.kinematics()
.iter()
.position(|&kin| kin == Kinematics::X(idx_b))
.unwrap();
let mut indices: Vec<usize> = (0..self.channels.len()).rev().collect();
while let Some(index) = indices.pop() {
let channel_entry = &self.channels[index];
if *channel_entry == channel_entry.transpose(idx_a, idx_b) {
self.subgrids
.slice_mut(s![.., .., index])
.iter_mut()
.for_each(|subgrid| {
if !subgrid.is_empty()
&& (subgrid.node_values()[a_subgrid]
== subgrid.node_values()[b_subgrid])
{
subgrid.symmetrize(a_subgrid, b_subgrid);
}
});
} else if let Some((j, &other_index)) = indices
.iter()
.enumerate()
.find(|(_, i)| self.channels[**i] == channel_entry.transpose(idx_a, idx_b))
{
indices.remove(j);
let (mut a, mut b) = self
.subgrids
.multi_slice_mut((s![.., .., index], s![.., .., other_index]));
for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) {
lhs.merge(rhs, Some((a_subgrid, b_subgrid)));
*rhs = EmptySubgridV1.into();
}
}
}
}
pub fn upgrade(&mut self) {}
#[must_use]
pub const fn metadata(&self) -> &BTreeMap<String, String> {
&self.metadata
}
#[must_use]
pub fn metadata_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.metadata
}
#[must_use]
pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo {
let mut ren1 = Vec::new();
let mut fac1 = Vec::new();
let mut frg1 = Vec::new();
let mut x1 = Vec::new();
let mut pids1 = Vec::new();
for (channel, subgrid) in self
.subgrids()
.indexed_iter()
.filter_map(|(tuple, subgrid)| {
(!subgrid.is_empty() && (order_mask.is_empty() || order_mask[tuple.0]))
.then_some((&self.channels()[tuple.2], subgrid))
})
{
ren1.extend(
self.scales()
.ren
.calc(&subgrid.node_values(), self.kinematics())
.iter(),
);
ren1.sort_by(f64::total_cmp);
ren1.dedup_by(subgrid::node_value_eq_ref_mut);
fac1.extend(
self.scales()
.fac
.calc(&subgrid.node_values(), self.kinematics())
.iter(),
);
fac1.sort_by(f64::total_cmp);
fac1.dedup_by(subgrid::node_value_eq_ref_mut);
frg1.extend(
self.scales()
.frg
.calc(&subgrid.node_values(), self.kinematics())
.iter(),
);
frg1.sort_by(f64::total_cmp);
frg1.dedup_by(subgrid::node_value_eq_ref_mut);
x1.extend(
subgrid
.node_values()
.iter()
.zip(self.kinematics())
.filter(|(_, kin)| matches!(kin, Kinematics::X(_)))
.flat_map(|(nv, _)| nv),
);
x1.sort_by(f64::total_cmp);
x1.dedup_by(subgrid::node_value_eq_ref_mut);
for (index, _) in self.convolutions().iter().enumerate() {
pids1.extend(channel.entry().iter().map(|(pids, _)| pids[index]));
}
pids1.sort_unstable();
pids1.dedup();
}
EvolveInfo {
fac1,
frg1,
pids1,
x1,
ren1,
}
}
pub fn evolve<
'a,
E: Into<anyhow::Error>,
S: IntoIterator<Item = std::result::Result<(OperatorSliceInfo, CowArray<'a, f64, Ix4>), E>>,
>(
&self,
slices: Vec<S>,
order_mask: &[bool],
xi: (f64, f64, f64),
alphas_table: &AlphasTable,
) -> Result<FkTable> {
struct Iter<T> {
iters: Vec<T>,
}
impl<T: Iterator> Iterator for Iter<T> {
type Item = Vec<T::Item>;
fn next(&mut self) -> Option<Self::Item> {
self.iters.iter_mut().map(Iterator::next).collect()
}
}
fn zip_n<O, T>(iters: O) -> impl Iterator<Item = Vec<T::Item>>
where
O: IntoIterator<Item = T>,
T: IntoIterator,
{
Iter {
iters: iters.into_iter().map(IntoIterator::into_iter).collect(),
}
}
let mut result: Option<Self> = None;
let mut eko_map = Vec::new();
let mut scales0 = [None, None];
let mut op_scales1 = [Vec::new(), Vec::new()];
let names = ["fac", "frg"];
let EvolveInfo {
fac1: grid_fac1,
frg1: grid_frg1,
..
} = self.evolve_info(order_mask);
let grid_scales1: [Vec<_>; 2] = [
grid_fac1.into_iter().map(|fac| xi.1 * xi.1 * fac).collect(),
grid_frg1.into_iter().map(|frg| xi.2 * xi.2 * frg).collect(),
];
for slice in zip_n(slices) {
let (infos, operators): (Vec<_>, Vec<_>) = slice
.into_iter()
.map(|res| res.map_err(|err| Error::Other(err.into())))
.collect::<Result<_>>()?;
let pid_basis = infos[0].pid_basis;
if !infos.iter().all(|info| info.pid_basis == pid_basis) {
return Err(Error::General(
"the EKOs' PID bases are not all equal".to_owned(),
));
}
let mut scales1 = [None, None];
for (info, operator) in infos.iter().zip(&operators) {
let dim_op_info = (
info.pids1.len(),
info.x1.len(),
info.pids0.len(),
info.x0.len(),
);
if operator.dim() != dim_op_info {
return Err(Error::General(format!(
"operator information {dim_op_info:?} does not match the operator's dimensions: {:?}",
operator.dim()
)));
}
let idx = usize::from(!info.conv_type.is_pdf());
for (scale, op_scale) in [
(&mut scales0[idx], info.fac0),
(&mut scales1[idx], info.fac1),
] {
if let &mut Some(scale) = scale {
if !approx_eq!(f64, scale, op_scale, ulps = 8) {
return Err(Error::General(format!(
"EKO slice's {0}{idx} = '{op_scale}' is incompatible with previous slices' {0}{idx} = '{scale}'",
names[idx],
)));
}
} else {
*scale = Some(op_scale);
}
}
}
if eko_map.is_empty() {
let eko_conv_types: Vec<_> = infos.iter().map(|info| info.conv_type).collect();
eko_map = self
.convolutions()
.iter()
.map(|conv| {
eko_conv_types
.iter()
.position(|&eko_conv_type| eko_conv_type == conv.conv_type())
.ok_or_else(|| {
Error::General(format!(
"no EKO for convolution type `{conv:?}` found"
))
})
})
.collect::<Result<_>>()?;
}
for ((&scale1, op_scales1), grid_scales1) in
scales1.iter().zip(&mut op_scales1).zip(&grid_scales1)
{
if let Some(scale1) = scale1 {
if op_scales1
.iter()
.any(|&s| subgrid::node_value_eq(s, scale1))
{
continue;
}
if !grid_scales1
.iter()
.any(|&s| subgrid::node_value_eq(s, scale1))
{
continue;
}
op_scales1.push(scale1);
}
}
let operators: Vec<_> = eko_map.iter().map(|&idx| operators[idx].view()).collect();
let infos: Vec<_> = eko_map.iter().map(|&idx| infos[idx].clone()).collect();
let (fac, frg, scale_values) = match scales0 {
[None, None] => unreachable!(),
[Some(fac0), None] => (ScaleFuncForm::Scale(0), ScaleFuncForm::NoScale, vec![fac0]),
[None, Some(frg0)] => (ScaleFuncForm::NoScale, ScaleFuncForm::Scale(0), vec![frg0]),
[Some(fac0), Some(frg0)] => {
if approx_eq!(f64, fac0, frg0, ulps = 8) {
(ScaleFuncForm::Scale(0), ScaleFuncForm::Scale(0), vec![fac0])
} else {
(
ScaleFuncForm::Scale(0),
ScaleFuncForm::Scale(1),
vec![fac0, frg0],
)
}
}
};
let (subgrids, channels) = evolution::evolve_slice(
self,
&operators,
&infos,
&scale_values,
order_mask,
xi,
alphas_table,
)?;
let evolved_slice = Self {
subgrids,
bwfl: self.bwfl().clone(),
orders: vec![Order::new(0, 0, 0, 0, 0)],
channels,
pid_basis,
convolutions: self.convolutions.clone(),
interps: self.interps.clone(),
kinematics: (0..scale_values.len())
.map(Kinematics::Scale)
.chain(
self.kinematics
.iter()
.filter(|kin| matches!(kin, Kinematics::X(_)))
.copied(),
)
.collect(),
scales: Scales {
ren: ScaleFuncForm::NoScale,
fac,
frg,
},
metadata: self.metadata.clone(),
more_members: self.more_members.clone(),
reference: self.reference.clone(),
};
if let Some(result) = &mut result {
result.merge(evolved_slice)?;
} else {
result = Some(evolved_slice);
}
}
for ((grid_scales1, op_scales1), name) in grid_scales1.iter().zip(&op_scales1).zip(&names) {
if let Some(scale1) = grid_scales1
.iter()
.find(|&&a| !op_scales1.iter().any(|&b| subgrid::node_value_eq(a, b)))
{
return Err(Error::General(format!(
"no operator for {name}1 = '{scale1}' found"
)));
}
}
let result =
result.ok_or_else(|| Error::General("no evolution was performed".to_owned()))?;
Ok(FkTable::try_from(result).unwrap_or_else(|_| unreachable!()))
}
pub fn delete_bins(&mut self, bin_indices: &[usize]) {
let mut bin_indices: Vec<_> = bin_indices
.iter()
.copied()
.filter(|&index| index < self.bwfl().len())
.collect();
bin_indices.sort_unstable();
bin_indices.dedup();
let bin_indices = bin_indices;
for &bin_index in bin_indices.iter().rev() {
self.subgrids.remove_index(Axis(1), bin_index);
self.bwfl.remove(bin_index);
}
}
pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) {
let self_pid_basis = *self.pid_basis();
for channel in &mut self.channels {
*channel = self_pid_basis.translate(pid_basis, channel.clone());
}
self.pid_basis = pid_basis;
}
pub fn delete_channels(&mut self, channel_indices: &[usize]) {
let mut channel_indices: Vec<_> = channel_indices
.iter()
.copied()
.filter(|&index| index < self.channels().len())
.collect();
channel_indices.sort_unstable();
channel_indices.dedup();
channel_indices.reverse();
let channel_indices = channel_indices;
for index in channel_indices {
self.channels.remove(index);
self.subgrids.remove_index(Axis(2), index);
}
}
pub fn delete_orders(&mut self, order_indices: &[usize]) {
let mut order_indices: Vec<_> = order_indices
.iter()
.copied()
.filter(|&index| index < self.orders().len())
.collect();
order_indices.sort_unstable();
order_indices.dedup();
order_indices.reverse();
let order_indices = order_indices;
for index in order_indices {
self.orders.remove(index);
self.subgrids.remove_index(Axis(0), index);
}
}
pub fn split_channels(&mut self) {
let indices: Vec<_> = self
.channels()
.iter()
.enumerate()
.flat_map(|(index, entry)| iter::repeat(index).take(entry.entry().len()))
.collect();
self.subgrids = self.subgrids.select(Axis(2), &indices);
self.channels = self
.channels()
.iter()
.flat_map(|entry| {
entry
.entry()
.iter()
.cloned()
.map(move |entry| Channel::new(vec![entry]))
})
.collect();
}
pub fn merge_channel_factors(&mut self) {
let (factors, new_channels): (Vec<_>, Vec<_>) =
self.channels().iter().map(Channel::factor).unzip();
for (mut subgrids_bo, &factor) in self.subgrids.axis_iter_mut(CHANNEL_AXIS).zip(&factors) {
subgrids_bo.map_inplace(|subgrid| subgrid.scale(factor));
}
self.channels = new_channels;
}
pub fn fix_convolution(
&self,
conv_idx: usize,
xfx: &mut dyn FnMut(i32, f64, f64) -> f64,
xi: f64,
) -> Result<Self> {
if self.convolutions.len() <= 1 {
return Err(Error::General(
"cannot fix the last convolution".to_string(),
));
}
if conv_idx >= self.convolutions.len() {
return Err(Error::General(format!(
"convolution index {} out of bounds (max: {})",
conv_idx,
self.convolutions.len() - 1
)));
}
let mut new_convolutions = self.convolutions.clone();
new_convolutions.remove(conv_idx);
let mut new_kinematics = self.kinematics.clone();
let mut new_interps = self.interps.clone();
let kin_pos = new_kinematics
.iter()
.position(|k| *k == Kinematics::X(conv_idx))
.unwrap();
new_kinematics.remove(kin_pos);
new_interps.remove(kin_pos);
for kin in &mut new_kinematics {
if let Kinematics::X(i) = kin {
if *i > conv_idx {
*i -= 1;
}
}
}
let mut new_channel_map: BTreeMap<Vec<i32>, Vec<(usize, i32, f64)>> = BTreeMap::new();
for (ichan, chan) in self.channels().iter().enumerate() {
for (pids, factor) in chan.entry() {
let mut new_pids = pids.clone();
let fixed_pid = new_pids.remove(conv_idx);
new_channel_map
.entry(new_pids)
.or_default()
.push((ichan, fixed_pid, *factor));
}
}
let new_channels: Vec<Channel> = new_channel_map
.keys()
.map(|pids| Channel::new(vec![(pids.clone(), 1.0)]))
.collect();
let new_channel_pids: Vec<_> = new_channel_map.keys().cloned().collect();
let conv_to_fix = &self.convolutions[conv_idx];
let (new_orders, order_map) = {
let mut unique_orders = Vec::new();
let other_conv_idx_opt = if self.convolutions.len() == 2 {
Some(1 - conv_idx)
} else {
None
};
let map: Vec<usize> = self
.orders
.iter()
.map(|o| {
let mut new_o = o.clone();
if conv_to_fix.conv_type().is_ff() {
new_o.logxia = 0;
} else if let Some(other_conv_idx) = other_conv_idx_opt {
if conv_to_fix.conv_type().is_pdf()
&& self.convolutions[other_conv_idx].conv_type().is_ff()
{
new_o.logxif = 0;
}
}
unique_orders
.iter()
.position(|uo| uo == &new_o)
.unwrap_or_else(|| {
unique_orders.push(new_o);
unique_orders.len() - 1
})
})
.collect();
(unique_orders, map)
};
let mut new_subgrids: Array3<SubgridEnum> = Array3::from_shape_simple_fn(
(new_orders.len(), self.bwfl.len(), new_channels.len()),
|| EmptySubgridV1.into(),
);
for (inew_chan, new_pids) in new_channel_pids.iter().enumerate() {
let origins = &new_channel_map[new_pids];
(0..self.orders().len()).for_each(|iord| {
for ibin in 0..self.bwfl().bins().len() {
let mut intermediate_sg: Option<SubgridEnum> = None;
for &(ichan_orig, pid_fixed, factor) in origins {
let sg_orig = &self.subgrids[[iord, ibin, ichan_orig]];
if sg_orig.is_empty() {
continue;
}
let mut new_node_values = sg_orig.node_values();
new_node_values.remove(kin_pos);
let mut sg_new_array =
PackedArray::new(new_node_values.iter().map(Vec::len).collect());
let scale_form = if conv_to_fix.conv_type().is_pdf() {
&self.scales.fac
} else {
&self.scales.frg
};
for (mut idxs_orig, val_orig) in sg_orig.indexed_iter() {
let x_val = idxs_orig.remove(kin_pos);
let sg_orig_node_values = sg_orig.node_values();
let self_kinematics = self.kinematics();
let scale_dims: Vec<_> = sg_orig_node_values
.iter()
.enumerate()
.filter(|(i, _)| {
matches!(self_kinematics.get(*i), Some(Kinematics::Scale(_)))
})
.map(|(_, v)| v.len())
.collect();
let mu2_nodes_calc =
scale_form.calc(&sg_orig_node_values, self_kinematics);
let mu2_idx = scale_form.idx(&idxs_orig, &scale_dims);
let mu2_val = mu2_nodes_calc[mu2_idx] * xi * xi;
let x = sg_orig_node_values[kin_pos][x_val];
let pdf_val = xfx(pid_fixed, x, mu2_val) / x;
let final_val = val_orig * factor * pdf_val;
sg_new_array[idxs_orig.as_slice()] += final_val;
}
let sg_contrib: SubgridEnum =
ImportSubgridV1::new(sg_new_array, new_node_values).into();
if let Some(ref mut isg) = intermediate_sg {
isg.merge(&sg_contrib, None);
} else {
intermediate_sg = Some(sg_contrib);
}
}
if let Some(sg) = intermediate_sg {
let new_iord = order_map[iord];
new_subgrids[[new_iord, ibin, inew_chan]].merge(&sg, None);
}
}
});
}
let mut new_grid = Self::new(
self.bwfl.clone(),
new_orders,
new_channels,
*self.pid_basis(),
new_convolutions,
new_interps,
new_kinematics,
self.scales.clone(),
);
new_grid.subgrids = new_subgrids;
new_grid.metadata = self.metadata.clone();
new_grid.optimize_using(
GridOptFlags::STRIP_EMPTY_ORDERS
| GridOptFlags::STRIP_EMPTY_CHANNELS
| GridOptFlags::MERGE_SAME_CHANNELS,
);
Ok(new_grid)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::boc::ScaleFuncForm;
use crate::channel;
use crate::convolutions::ConvType;
use crate::interpolation::Map;
use float_cmp::assert_approx_eq;
use std::fs::File;
#[test]
fn interpolations() {
let grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![Channel::new(vec![(vec![1, -1], 1.0), (vec![2, -2], 1.0)])],
PidBasis::Pdg,
vec![
Conv::new(ConvType::UnpolPDF, 2212),
Conv::new(ConvType::UnpolPDF, 2212),
],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
let interps = grid.interpolations();
assert!(matches!(interps[0].map(), Map::ApplGridH0));
assert!(matches!(interps[1].map(), Map::ApplGridF2));
assert!(matches!(interps[2].map(), Map::ApplGridF2));
}
#[test]
#[should_panic(expected = "channel #0 has wrong number of PIDs: expected 2, found 3")]
fn grid_new_panic0() {
let channel = vec![(vec![1, -1, 1], 1.0), (vec![2, -2, 2], 1.0)];
let _ = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![Channel::new(channel)],
PidBasis::Pdg,
vec![
Conv::new(ConvType::UnpolPDF, 2212),
Conv::new(ConvType::UnpolPDF, 2212),
],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
}
#[test]
#[should_panic(expected = "interps and kinematics have different lengths: 2 vs. 3")]
fn grid_new_panic1() {
let channel = vec![(vec![1, -1], 1.0), (vec![2, -2], 1.0)];
let _ = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![Channel::new(channel)],
PidBasis::Pdg,
vec![
Conv::new(ConvType::UnpolPDF, 2212),
Conv::new(ConvType::UnpolPDF, 2212),
],
v0::default_interps(false, 1),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
}
#[test]
#[should_panic(expected = "scales and kinematics are not compatible")]
fn grid_new_panic2() {
let channel = vec![(vec![1, -1], 1.0), (vec![2, -2], 1.0)];
let _ = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![Channel::new(channel)],
PidBasis::Pdg,
vec![
Conv::new(ConvType::UnpolPDF, 2212),
Conv::new(ConvType::UnpolPDF, 2212),
],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(1),
frg: ScaleFuncForm::NoScale,
},
);
}
#[test]
fn grid_read_file_version_unsupported() {
let result = Grid::read(
&[
b'P', b'i', b'n', b'e', b'A', b'P', b'P', b'L', 99, 0, 0, 0, 0, 0, 0, 0,
][..],
);
assert!(
matches!(result, Err(Error::General(msg)) if msg == "file version 99 is not supported")
);
}
#[test]
fn grid_merge_empty_subgrids() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
let other = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(1, 2, 0, 0, 0), Order::new(1, 2, 0, 1, 0)],
vec![
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
grid.merge(other).unwrap();
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
}
#[test]
fn grid_repair() {
use super::super::packed_array::PackedArray;
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![channel![1.0 * (2, 2) + 1.0 * (4, 4)]],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
let was_repaired = grid.repair();
assert!(!was_repaired);
let x = vec![
0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0,
];
let mut ar = PackedArray::new(vec![1, 10, 10]);
ar[[0, 0, 0]] = 0.;
let sg: SubgridEnum = ImportSubgridV1::new(ar, vec![vec![0.0], x.clone(), x]).into();
grid.subgrids_mut()[[0, 0, 0]] = sg;
let was_repaired = grid.repair();
assert!(was_repaired);
}
#[test]
fn grid_merge_orders() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
let mut other = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![
Order::new(1, 2, 0, 0, 0),
Order::new(1, 2, 0, 1, 0),
Order::new(0, 2, 0, 0, 0),
],
vec![
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 1.0);
other.fill(0, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 2.0);
other.fill(1, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 1.0);
other.fill(1, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 2.0);
grid.merge(other).unwrap();
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 3);
}
#[test]
fn grid_merge_channels_entries() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
let mut other = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (22, 22)],
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 3.0);
grid.merge(other).unwrap();
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 3);
assert_eq!(grid.orders().len(), 1);
}
#[test]
fn grid_merge_bins() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 0.25, 0.5].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
assert_eq!(grid.bwfl().len(), 2);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
let mut other = Grid::new(
BinsWithFillLimits::from_fill_limits([0.5, 0.75, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![
channel![1.0 * (1, 1) + 1.0 * (3, 3)],
channel![1.0 * (2, 2) + 1.0 * (4, 4)],
],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
other.fill(0, 0.1, 0, &[90.0_f64.powi(2), 0.1, 0.2], 2.0);
other.fill(0, 0.1, 1, &[90.0_f64.powi(2), 0.1, 0.2], 3.0);
grid.merge(other).unwrap();
assert_eq!(grid.bwfl().len(), 4);
assert_eq!(grid.channels().len(), 2);
assert_eq!(grid.orders().len(), 1);
}
#[test]
fn grid_merge_channel_factors() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 2, 0, 0, 0)],
vec![Channel::new(vec![(vec![1, -1], 0.5), (vec![2, -2], 2.5)])],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
grid.merge_channel_factors();
grid.channels().iter().all(|channel| {
channel
.entry()
.iter()
.all(|(_, fac)| (*fac - 1.0).abs() < f64::EPSILON)
});
}
#[test]
fn grid_convolutions() {
let mut grid = Grid::new(
BinsWithFillLimits::from_fill_limits([0.0, 1.0].to_vec()).unwrap(),
vec![Order::new(0, 0, 0, 0, 0)],
vec![channel![1.0 * (21, 21)]],
PidBasis::Pdg,
vec![Conv::new(ConvType::UnpolPDF, 2212); 2],
v0::default_interps(false, 2),
vec![Kinematics::Scale(0), Kinematics::X(0), Kinematics::X(1)],
Scales {
ren: ScaleFuncForm::Scale(0),
fac: ScaleFuncForm::Scale(0),
frg: ScaleFuncForm::NoScale,
},
);
assert_eq!(
grid.convolutions(),
[
Conv::new(ConvType::UnpolPDF, 2212),
Conv::new(ConvType::UnpolPDF, 2212)
]
);
grid.convolutions_mut()[0] = Conv::new(ConvType::UnpolPDF, -2212);
grid.convolutions_mut()[1] = Conv::new(ConvType::UnpolPDF, -2212);
assert_eq!(
grid.convolutions(),
[
Conv::new(ConvType::UnpolPDF, -2212),
Conv::new(ConvType::UnpolPDF, -2212)
]
);
}
#[test]
fn evolve_info() {
let grid =
Grid::read(File::open("../test-data/LHCB_WP_7TEV_opt.pineappl.lz4").unwrap()).unwrap();
let info = grid.evolve_info(&[]);
assert_eq!(info.fac1.len(), 1);
assert_approx_eq!(f64, info.fac1[0], 6456.443904000001, ulps = 64);
assert_eq!(info.pids1, [-3, -1, 2, 4, 21, 22]);
assert_eq!(info.x1.len(), 39);
assert_approx_eq!(f64, info.x1[0], 1.9602505002391748e-5, ulps = 64);
assert_approx_eq!(f64, info.x1[1], 2.97384953722449e-5, ulps = 64);
assert_approx_eq!(f64, info.x1[2], 4.511438394964044e-5, ulps = 64);
assert_approx_eq!(f64, info.x1[3], 6.843744918967896e-5, ulps = 64);
assert_approx_eq!(f64, info.x1[4], 0.00010381172986576898, ulps = 64);
assert_approx_eq!(f64, info.x1[5], 0.00015745605600841445, ulps = 64);
assert_approx_eq!(f64, info.x1[6], 0.00023878782918561914, ulps = 64);
assert_approx_eq!(f64, info.x1[7], 0.00036205449638139736, ulps = 64);
assert_approx_eq!(f64, info.x1[8], 0.0005487795323670796, ulps = 64);
assert_approx_eq!(f64, info.x1[9], 0.0008314068836488144, ulps = 64);
assert_approx_eq!(f64, info.x1[10], 0.0012586797144272762, ulps = 64);
assert_approx_eq!(f64, info.x1[11], 0.0019034634022867384, ulps = 64);
assert_approx_eq!(f64, info.x1[12], 0.0028738675812817515, ulps = 64);
assert_approx_eq!(f64, info.x1[13], 0.004328500638820811, ulps = 64);
assert_approx_eq!(f64, info.x1[14], 0.006496206194633799, ulps = 64);
assert_approx_eq!(f64, info.x1[15], 0.009699159574043398, ulps = 64);
assert_approx_eq!(f64, info.x1[16], 0.014375068581090129, ulps = 64);
assert_approx_eq!(f64, info.x1[17], 0.02108918668378717, ulps = 64);
assert_approx_eq!(f64, info.x1[18], 0.030521584007828916, ulps = 64);
assert_approx_eq!(f64, info.x1[19], 0.04341491741702269, ulps = 64);
assert_approx_eq!(f64, info.x1[20], 0.060480028754447364, ulps = 64);
assert_approx_eq!(f64, info.x1[21], 0.08228122126204893, ulps = 64);
assert_approx_eq!(f64, info.x1[22], 0.10914375746330703, ulps = 64);
assert_approx_eq!(f64, info.x1[23], 0.14112080644440345, ulps = 64);
assert_approx_eq!(f64, info.x1[24], 0.17802566042569432, ulps = 64);
assert_approx_eq!(f64, info.x1[25], 0.2195041265003886, ulps = 64);
assert_approx_eq!(f64, info.x1[26], 0.2651137041582823, ulps = 64);
assert_approx_eq!(f64, info.x1[27], 0.31438740076927585, ulps = 64);
assert_approx_eq!(f64, info.x1[28], 0.3668753186482242, ulps = 64);
assert_approx_eq!(f64, info.x1[29], 0.4221667753589648, ulps = 64);
assert_approx_eq!(f64, info.x1[30], 0.4798989029610255, ulps = 64);
assert_approx_eq!(f64, info.x1[31], 0.5397572337880445, ulps = 64);
assert_approx_eq!(f64, info.x1[32], 0.601472197967335, ulps = 64);
assert_approx_eq!(f64, info.x1[33], 0.6648139482473823, ulps = 64);
assert_approx_eq!(f64, info.x1[34], 0.7295868442414312, ulps = 64);
assert_approx_eq!(f64, info.x1[35], 0.7956242522922756, ulps = 64);
assert_approx_eq!(f64, info.x1[36], 0.8627839323906108, ulps = 64);
assert_approx_eq!(f64, info.x1[37], 0.9309440808717544, ulps = 64);
assert_approx_eq!(f64, info.x1[38], 1.0, ulps = 64);
assert_eq!(info.ren1.len(), 1);
assert_approx_eq!(f64, info.ren1[0], 6456.443904000001, ulps = 64);
}
}