use calculo::num::Sign;
use fixedbitset::FixedBitSet;
use std::fmt::Debug;
pub type Big = usize;
pub type Row = usize;
pub type Col = usize;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct RowId(pub usize);
impl RowId {
pub fn new(index: usize) -> Self {
Self(index)
}
pub fn as_index(self) -> usize {
self.0
}
}
impl From<usize> for RowId {
fn from(value: usize) -> Self {
RowId::new(value)
}
}
impl From<RowId> for usize {
fn from(value: RowId) -> Self {
value.0
}
}
impl std::fmt::Display for RowId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl PartialEq<usize> for RowId {
fn eq(&self, other: &usize) -> bool {
&self.0 == other
}
}
impl PartialEq<RowId> for usize {
fn eq(&self, other: &RowId) -> bool {
self == &other.0
}
}
impl std::ops::Add<usize> for RowId {
type Output = RowId;
fn add(self, rhs: usize) -> Self::Output {
RowId(self.0 + rhs)
}
}
impl std::ops::Sub<usize> for RowId {
type Output = RowId;
fn sub(self, rhs: usize) -> Self::Output {
assert!(self.0 >= rhs, "RowId underflow");
RowId(self.0 - rhs)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ColId(pub usize);
impl ColId {
pub fn new(index: usize) -> Self {
Self(index)
}
pub fn as_index(self) -> usize {
self.0
}
}
impl From<usize> for ColId {
fn from(value: usize) -> Self {
ColId::new(value)
}
}
impl From<ColId> for usize {
fn from(value: ColId) -> Self {
value.0
}
}
impl std::fmt::Display for ColId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl PartialEq<usize> for ColId {
fn eq(&self, other: &usize) -> bool {
&self.0 == other
}
}
impl PartialEq<ColId> for usize {
fn eq(&self, other: &ColId) -> bool {
self == &other.0
}
}
impl std::ops::Add<usize> for ColId {
type Output = ColId;
fn add(self, rhs: usize) -> Self::Output {
ColId(self.0 + rhs)
}
}
impl std::ops::Sub<usize> for ColId {
type Output = ColId;
fn sub(self, rhs: usize) -> Self::Output {
assert!(self.0 >= rhs, "ColId underflow");
ColId(self.0 - rhs)
}
}
pub(crate) const WORD_BITS: usize = usize::BITS as usize;
#[inline(always)]
pub fn trailing_mask(dimension: usize) -> usize {
let remainder = dimension % WORD_BITS;
if remainder == 0 {
usize::MAX
} else {
(1usize << remainder) - 1
}
}
fn shrink_bits(bits: &FixedBitSet, dimension: usize) -> FixedBitSet {
let mut shrunk = FixedBitSet::with_capacity(dimension);
let destination = shrunk.as_mut_slice();
let source = bits.as_slice();
let shared = destination.len().min(source.len());
destination[..shared].copy_from_slice(&source[..shared]);
mask_trailing_bits(destination, dimension);
shrunk
}
pub(crate) fn mask_trailing_bits(blocks: &mut [usize], dimension: usize) {
let mask = trailing_mask(dimension);
if mask == usize::MAX {
return;
}
if let Some(last) = blocks.last_mut() {
*last &= mask;
}
}
#[inline]
fn first_unset_bit(blocks: &[usize], dimension: usize) -> Option<usize> {
if dimension == 0 {
return None;
}
let mask = trailing_mask(dimension);
let last = blocks.len().saturating_sub(1);
for (block_idx, &block) in blocks.iter().enumerate() {
let mut inverted = !block;
if block_idx == last {
inverted &= mask;
}
if inverted != 0 {
let bit = inverted.trailing_zeros() as usize;
let idx = block_idx * WORD_BITS + bit;
if idx < dimension {
return Some(idx);
}
return None;
}
}
None
}
#[inline]
fn last_unset_bit(blocks: &[usize], dimension: usize) -> Option<usize> {
if dimension == 0 {
return None;
}
let mask = trailing_mask(dimension);
let last = blocks.len().saturating_sub(1);
for (block_idx, &block) in blocks.iter().enumerate().rev() {
let mut inverted = !block;
if block_idx == last {
inverted &= mask;
}
if inverted != 0 {
let bit = WORD_BITS - 1 - inverted.leading_zeros() as usize;
let idx = block_idx * WORD_BITS + bit;
if idx < dimension {
return Some(idx);
}
return None;
}
}
None
}
fn assert_same_dimension(left: &FixedBitSet, right: &FixedBitSet) {
debug_assert_eq!(left.len(), right.len());
}
#[inline]
fn assert_index_in_range(kind: &'static str, idx: usize, len: usize) {
assert!(
idx < len,
"{}: index {} out of bounds (len {})",
kind,
idx,
len
);
}
#[derive(Debug, Eq, PartialEq)]
pub struct RowSet {
bits: FixedBitSet,
}
impl AsRef<RowSet> for RowSet {
#[inline(always)]
fn as_ref(&self) -> &RowSet {
self
}
}
impl Clone for RowSet {
fn clone(&self) -> Self {
Self {
bits: self.bits.clone(),
}
}
fn clone_from(&mut self, source: &Self) {
self.copy_from(source);
}
}
impl RowSet {
pub fn new(dimension: usize) -> Self {
Self {
bits: FixedBitSet::with_capacity(dimension),
}
}
pub fn all(dimension: usize) -> Self {
let mut bits = FixedBitSet::with_capacity(dimension);
bits.set_range(.., true);
Self { bits }
}
pub fn len(&self) -> usize {
self.bits.len()
}
pub fn resize(&mut self, dimension: usize) {
if dimension == self.bits.len() {
return;
}
if dimension > self.bits.len() {
self.bits.grow(dimension);
return;
}
self.bits = shrink_bits(&self.bits, dimension);
}
pub fn copy_from(&mut self, other: &Self) {
self.resize(other.len());
debug_assert_eq!(
self.bits.as_slice().len(),
other.bits.as_slice().len(),
"RowSet::copy_from requires matching dimensions"
);
self.bits
.as_mut_slice()
.copy_from_slice(other.bits.as_slice());
}
pub fn is_empty(&self) -> bool {
self.bits.is_clear()
}
pub fn clear(&mut self) {
self.bits.clear();
}
pub fn insert<T: Into<RowId>>(&mut self, row: T) {
let idx = row.into().as_index();
assert_index_in_range("RowSet::insert", idx, self.bits.len());
self.bits.insert(idx);
}
pub fn insert_raw(&mut self, idx: usize) {
self.insert(RowId::new(idx));
}
pub fn remove<T: Into<RowId>>(&mut self, row: T) {
let idx = row.into().as_index();
assert_index_in_range("RowSet::remove", idx, self.bits.len());
self.bits.remove(idx);
}
pub fn contains_raw(&self, idx: usize) -> bool {
self.contains(RowId::new(idx))
}
pub fn contains<T: Into<RowId>>(&self, row: T) -> bool {
let idx = row.into().as_index();
assert_index_in_range("RowSet::contains", idx, self.bits.len());
self.bits.contains(idx)
}
pub fn intersection(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.intersect_with(&other.bits);
Self { bits }
}
pub fn intersection_inplace(&mut self, other: &Self) {
assert_same_dimension(&self.bits, &other.bits);
self.bits.intersect_with(&other.bits);
}
pub fn union(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.union_with(&other.bits);
Self { bits }
}
pub fn union_inplace(&mut self, other: &Self) {
assert_same_dimension(&self.bits, &other.bits);
self.bits.union_with(&other.bits);
}
pub fn difference(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.difference_with(&other.bits);
Self { bits }
}
pub fn difference_inplace(&mut self, other: &Self) {
assert_same_dimension(&self.bits, &other.bits);
self.bits.difference_with(&other.bits);
}
pub fn complement(&self) -> Self {
if self.bits.is_empty() {
return Self::new(0);
}
let mut bits = self.bits.clone();
bits.toggle_range(..);
Self { bits }
}
pub fn subset_of(&self, other: &Self) -> bool {
assert_same_dimension(&self.bits, &other.bits);
self.bits.is_subset(&other.bits)
}
pub fn cardinality(&self) -> usize {
self.bits.count_ones(..)
}
pub fn iter(&self) -> impl Iterator<Item = RowId> + '_ {
self.bits.ones().map(RowId)
}
pub fn iter_complement(&self) -> impl Iterator<Item = RowId> + '_ {
self.bits.zeroes().map(RowId)
}
pub fn first_unset(&self) -> Option<RowId> {
first_unset_bit(self.bits.as_slice(), self.bits.len()).map(RowId)
}
pub fn last_unset(&self) -> Option<RowId> {
last_unset_bit(self.bits.as_slice(), self.bits.len()).map(RowId)
}
pub fn bit_slice(&self) -> &[usize] {
self.bits.as_slice()
}
pub fn count_intersection(&self, other: &Self) -> usize {
assert_same_dimension(&self.bits, &other.bits);
self.bits.intersection_count(&other.bits)
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ColSet {
bits: FixedBitSet,
}
impl ColSet {
pub fn new(dimension: usize) -> Self {
Self {
bits: FixedBitSet::with_capacity(dimension),
}
}
pub fn all(dimension: usize) -> Self {
let mut bits = FixedBitSet::with_capacity(dimension);
bits.set_range(.., true);
Self { bits }
}
pub fn len(&self) -> usize {
self.bits.len()
}
pub fn resize(&mut self, dimension: usize) {
if dimension == self.bits.len() {
return;
}
if dimension > self.bits.len() {
self.bits.grow(dimension);
return;
}
self.bits = shrink_bits(&self.bits, dimension);
}
pub fn is_empty(&self) -> bool {
self.bits.is_clear()
}
pub fn clear(&mut self) {
self.bits.clear();
}
pub fn insert<T: Into<ColId>>(&mut self, col: T) {
let idx = col.into().as_index();
assert_index_in_range("ColSet::insert", idx, self.bits.len());
self.bits.insert(idx);
}
pub fn insert_raw(&mut self, idx: usize) {
self.insert(ColId::new(idx));
}
pub fn remove<T: Into<ColId>>(&mut self, col: T) {
let idx = col.into().as_index();
assert_index_in_range("ColSet::remove", idx, self.bits.len());
self.bits.remove(idx);
}
pub fn contains_raw(&self, idx: usize) -> bool {
self.contains(ColId::new(idx))
}
pub fn contains<T: Into<ColId>>(&self, col: T) -> bool {
let idx = col.into().as_index();
assert_index_in_range("ColSet::contains", idx, self.bits.len());
self.bits.contains(idx)
}
pub fn intersection(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.intersect_with(&other.bits);
Self { bits }
}
pub fn union(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.union_with(&other.bits);
Self { bits }
}
pub fn difference(&self, other: &Self) -> Self {
assert_same_dimension(&self.bits, &other.bits);
let mut bits = self.bits.clone();
bits.difference_with(&other.bits);
Self { bits }
}
pub fn difference_inplace(&mut self, other: &Self) {
assert_same_dimension(&self.bits, &other.bits);
self.bits.difference_with(&other.bits);
}
pub fn complement(&self) -> Self {
if self.bits.is_empty() {
return Self::new(0);
}
let mut bits = self.bits.clone();
bits.toggle_range(..);
Self { bits }
}
pub fn subset_of(&self, other: &Self) -> bool {
assert_same_dimension(&self.bits, &other.bits);
self.bits.is_subset(&other.bits)
}
pub fn cardinality(&self) -> usize {
self.bits.count_ones(..)
}
pub fn iter(&self) -> impl Iterator<Item = ColId> + '_ {
self.bits.ones().map(ColId)
}
pub fn iter_complement(&self) -> impl Iterator<Item = ColId> + '_ {
self.bits.zeroes().map(ColId)
}
pub fn first_unset(&self) -> Option<ColId> {
first_unset_bit(self.bits.as_slice(), self.bits.len()).map(ColId)
}
pub fn last_unset(&self) -> Option<ColId> {
last_unset_bit(self.bits.as_slice(), self.bits.len()).map(ColId)
}
}
pub type RowIndex = Vec<isize>;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RowOrder {
MaxIndex,
MinIndex,
MinCutoff,
MaxCutoff,
MixCutoff,
LexMin,
LexMax,
RandomRow,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RepresentationKind {
Inequality,
Generator,
}
pub trait Representation: Debug {
const KIND: RepresentationKind;
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Inequality;
impl Representation for Inequality {
const KIND: RepresentationKind = RepresentationKind::Inequality;
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Generator;
impl Representation for Generator {
const KIND: RepresentationKind = RepresentationKind::Generator;
}
pub trait DualRepresentation: Representation {
type Dual: Representation;
}
impl DualRepresentation for Inequality {
type Dual = Generator;
}
impl DualRepresentation for Generator {
type Dual = Inequality;
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Conversion {
InequalityToGenerator,
GeneratorToInequality,
LpMax,
LpMin,
InteriorFind,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum IncidenceOutput {
Off,
Cardinality,
Set,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AdjacencyOutput {
Off,
List,
Degree,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Error {
DimensionTooLarge,
ImproperInputFormat,
NegativeMatrixSize,
EmptyVRepresentation,
EmptyHRepresentation,
EmptyRepresentation,
InputFileNotFound,
OutputFileNotOpen,
NoLpObjective,
NoRealNumberSupport,
NotAvailableForInequality,
NotAvailableForGenerator,
CannotHandleLinearity,
LpCycling,
NumericallyInconsistent,
NoError,
}
impl std::error::Error for Error {}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ComputationStatus {
InProgress,
AllFound,
RegionEmpty,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum InequalityKind {
Inequality,
Equality,
StrictInequality,
}
impl InequalityKind {
#[inline(always)]
pub fn violates_sign(self, sign: Sign, relaxed: bool) -> bool {
match self {
Self::Equality => sign != Sign::Zero,
Self::Inequality => sign == Sign::Negative,
Self::StrictInequality => {
if relaxed {
sign != Sign::Positive
} else {
sign == Sign::Negative
}
}
}
}
#[inline(always)]
pub fn weakly_violates_sign(self, sign: Sign, relaxed: bool) -> bool {
match self {
Self::Equality => sign != Sign::Zero,
Self::Inequality => sign == Sign::Negative,
Self::StrictInequality => {
if relaxed {
false
} else {
sign == Sign::Negative
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::{ColSet, RowSet};
#[test]
fn rowset_complement_iterator_covers_elements_across_blocks() {
let mut set = RowSet::new(70);
for idx in [0, 31, 63, 69] {
set.insert(idx);
}
let members: Vec<_> = set.iter().map(usize::from).collect();
assert_eq!(members, vec![0, 31, 63, 69]);
let complement: Vec<_> = set.iter_complement().map(usize::from).collect();
assert!(complement.iter().all(|&idx| !set.contains_raw(idx)));
let mut combined = members.clone();
combined.extend_from_slice(&complement);
combined.sort_unstable();
assert_eq!(combined, (0..70).collect::<Vec<_>>());
}
#[test]
fn resize_truncates_bits_past_new_length() {
let mut set = RowSet::new(10);
set.insert_raw(0);
set.insert_raw(5);
set.insert_raw(9);
set.resize(6);
assert_eq!(set.len(), 6);
let members: Vec<_> = set.iter().map(usize::from).collect();
assert_eq!(members, vec![0, 5]);
assert_eq!(set.cardinality(), 2);
}
#[test]
fn colset_complement_matches_expected_members() {
let mut set = ColSet::new(4);
set.insert_raw(1);
set.insert_raw(3);
let members: Vec<_> = set.iter().map(usize::from).collect();
assert_eq!(members, vec![1, 3]);
let complement: Vec<_> = set.iter_complement().map(usize::from).collect();
assert_eq!(complement, vec![0, 2]);
}
#[test]
fn rowset_all_creates_full_set() {
let set = RowSet::all(10);
assert_eq!(set.len(), 10);
assert_eq!(set.cardinality(), 10);
for i in 0..10 {
assert!(set.contains_raw(i));
}
}
#[test]
fn unset_helpers_match_complement_iterators() {
let mut set = RowSet::new(130);
for idx in [0, 1, 64, 129] {
set.insert_raw(idx);
}
assert_eq!(
set.first_unset().map(usize::from),
set.iter_complement().next().map(usize::from)
);
assert_eq!(
set.last_unset().map(usize::from),
set.iter_complement().last().map(usize::from)
);
let full = RowSet::all(10);
assert_eq!(full.first_unset(), None);
assert_eq!(full.last_unset(), None);
let empty = RowSet::new(10);
assert_eq!(empty.first_unset().map(usize::from), Some(0));
assert_eq!(empty.last_unset().map(usize::from), Some(9));
}
}