use serde_json::map::Entry;
use super::*;
use crate::algebra::matrices::operations::transform_vector_wise::PutbackIteratorMatrix;
use crate::algebra::vectors::entries::{KeyValSet, KeyValNew};
use crate::algebra::matrices::types::matching::{GeneralizedMatchingMatrixWithSequentialOrder};
use crate::algebra::matrices::types::vec_of_vec::sorted::VecOfVec;
use crate::algebra::matrices::query::{MatrixOracle, MatrixAlgebra};
use crate::utilities::iterators::general::{SkipUntil, IterWrappedVec};
use crate::utilities::iterators::merge::hit::{IteratorsMergedInSortedOrder, hit_bulk_insert};
use crate::algebra::vectors::entries::{KeyValPair};
use crate::algebra::rings::traits::{SemiringOperations, RingOperations, DivisionRingOperations};
use crate::utilities::order::{JudgePartialOrder, ReverseOrder};
use crate::algebra::vectors::operations::{Scale, VectorOperations, Simplify, OnlyIndicesInsideCollection, LinearCombinationSimplified};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::hash::Hash;
use std::fmt::Debug;
use std::iter::Peekable;
use std::vec::IntoIter;
fn target_comb_entry_to_scaled_truncated_row_of_matrix_to_factor<
MatrixToFactor,
>
(
target_comb_inv_entry: ( usize, MatrixToFactor::Coefficient ),
scale_factor: MatrixToFactor::Coefficient,
truncation_limit: & MatrixToFactor::RowEntry,
matrix_to_factor: & MatrixToFactor,
matching: & GeneralizedMatchingMatrixWithSequentialOrder< MatrixToFactor::ColumnIndex, MatrixToFactor::RowIndex, MatrixToFactor::Coefficient >,
)
->
Peekable<
Scale <
MatrixToFactor::Row, MatrixToFactor::RingOperator,
>,
>
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
let ring_operator = matrix_to_factor.ring_operator();
let order_operator_for_row_entries = matrix_to_factor.order_operator_for_row_entries();
let ( target_comb_inv_entry_index, target_comb_inv_entry_coeff ) = target_comb_inv_entry;
let scale_factor_aggregate = ring_operator.multiply( scale_factor, target_comb_inv_entry_coeff );
matrix_to_factor .row(
& matching.row_index_for_ordinal( target_comb_inv_entry_index )
)
.into_iter()
.scale_by( scale_factor_aggregate, ring_operator )
.peekable()
.skip_until( |x| {
let this = &order_operator_for_row_entries; this.judge_partial_cmp(truncation_limit, x) == Some(Ordering::Less) } )
}
fn try_writing_next_row_of_target_comb_to_buffer<
MatrixToFactor,
>
(
target_comb_inv_off_diag_row_buffer: &mut Vec< ( usize, MatrixToFactor::Coefficient ) >,
entries_to_eliminate_simplified_heap: & mut Simplify <
IteratorsMergedInSortedOrder <
Peekable<
Scale <
MatrixToFactor::Row, MatrixToFactor::RingOperator, >,
>,
MatrixToFactor::OrderOperatorForRowEntries
>,
MatrixToFactor::RingOperator,
>,
matrix_to_factor: & MatrixToFactor,
matching: & GeneralizedMatchingMatrixWithSequentialOrder< MatrixToFactor::ColumnIndex, MatrixToFactor::RowIndex, MatrixToFactor::Coefficient >,
array_target_comb_inv_off_diag: & Vec< Vec< (usize, MatrixToFactor::Coefficient) > >, )
->
Option< MatrixToFactor::RowEntry >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
let ring_operator = matrix_to_factor.ring_operator();
while let Some( leading_entry_to_eliminate ) = entries_to_eliminate_simplified_heap.next() {
if let Some( ordinal ) = matching.ordinal_for_column_index( & leading_entry_to_eliminate.key() ) {
let scale_factor = ring_operator.negate(
ring_operator.divide(
leading_entry_to_eliminate.val(),
matching.coefficient_for_ordinal( ordinal ),
)
);
hit_bulk_insert(
&mut entries_to_eliminate_simplified_heap.unsimplified,
array_target_comb_inv_off_diag[ ordinal ]
.iter()
.cloned()
.chain( std::iter::once(
( ordinal, MatrixToFactor::RingOperator::one() )
)
)
.map(
| target_comb_inv_entry |
{
target_comb_entry_to_scaled_truncated_row_of_matrix_to_factor(
target_comb_inv_entry,
scale_factor.clone(),
& leading_entry_to_eliminate, matrix_to_factor, matching, )
}
)
);
target_comb_inv_off_diag_row_buffer.push( ( ordinal, scale_factor.clone() ) );
target_comb_inv_off_diag_row_buffer
.extend(
array_target_comb_inv_off_diag[ ordinal ]
.iter()
.cloned() .scale_by( scale_factor, ring_operator.clone() )
);
} else {
return Some( leading_entry_to_eliminate );
}
}
None
}
pub fn get_pivot_block_of_target_comb_inverse_with_deleted_diagonal< MatrixToFactor, IterRowIndex >
(
matrix_to_factor: &MatrixToFactor, iter_row_index: IterRowIndex,
)
->
(
VecOfVec< usize, MatrixToFactor::Coefficient >,
GeneralizedMatchingMatrixWithSequentialOrder< MatrixToFactor::ColumnIndex, MatrixToFactor::RowIndex, MatrixToFactor::Coefficient >,
)
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
IterRowIndex: Iterator< Item = MatrixToFactor::RowIndex >,
{
let ring_operator = matrix_to_factor.ring_operator();
let order_operator_for_row_entries = matrix_to_factor.order_operator_for_row_entries();
let mut entries_to_eliminate_simplified_heap = IteratorsMergedInSortedOrder::new( order_operator_for_row_entries.clone() )
.simplify( ring_operator.clone() ); let mut matching = GeneralizedMatchingMatrixWithSequentialOrder::new(); let mut array_target_comb_inv_off_diag: Vec< Vec< ( usize, MatrixToFactor::Coefficient ) > > = Vec::new();
let mut target_comb_inv_off_diag_row_buffer = Vec::new();
let mut target_comb_inv_off_diag_row_buffer_simplified = Vec::new();
for row_index in iter_row_index {
entries_to_eliminate_simplified_heap.unsimplified.clear();
entries_to_eliminate_simplified_heap.unsimplified.insert_one_iter(
matrix_to_factor.row( & row_index )
.scale_by( MatrixToFactor::RingOperator::one(), ring_operator.clone() ) .peekable()
);
target_comb_inv_off_diag_row_buffer.clear();
let leading_entry_uneliminable_opt =
try_writing_next_row_of_target_comb_to_buffer(
& mut target_comb_inv_off_diag_row_buffer,
& mut entries_to_eliminate_simplified_heap,
matrix_to_factor,
& matching,
& array_target_comb_inv_off_diag,
);
match leading_entry_uneliminable_opt {
Some( leading_entry_uneliminable ) => {
let pivot_column_index = leading_entry_uneliminable.key();
let pivot_coeff = leading_entry_uneliminable.val();
matching.push( row_index, pivot_column_index, pivot_coeff );
target_comb_inv_off_diag_row_buffer.sort_by( |a, b| b.0.cmp( &a.0 ) ); let iter_to_push = target_comb_inv_off_diag_row_buffer
.iter()
.cloned()
.peekable()
.simplify( ring_operator.clone() );
target_comb_inv_off_diag_row_buffer_simplified.clear();
target_comb_inv_off_diag_row_buffer_simplified.extend( iter_to_push );
let num_entries = target_comb_inv_off_diag_row_buffer_simplified.len();
let mut vec_to_push = Vec::with_capacity( num_entries );
vec_to_push.extend( target_comb_inv_off_diag_row_buffer_simplified.drain( 0..num_entries ) );
array_target_comb_inv_off_diag.push( vec_to_push );
},
None => {}
}
}
if array_target_comb_inv_off_diag.is_empty() { return ( VecOfVec::new(array_target_comb_inv_off_diag).ok().unwrap(), matching ) }
array_target_comb_inv_off_diag.shrink_to_fit();
( &mut array_target_comb_inv_off_diag ).reverse();
let num_matched_pairs_minus_one = array_target_comb_inv_off_diag.len() - 1;
for row_vec in array_target_comb_inv_off_diag.iter_mut() {
for entry in row_vec.iter_mut() {
entry.set_key( num_matched_pairs_minus_one - entry.key() )
}
}
matching.reverse_order_of_matches();
( VecOfVec::new(array_target_comb_inv_off_diag).ok().unwrap(), matching )
}
pub fn target_comb_inv_off_diag_pivot_block_skipmatched< MatrixToFactor, IterRowIndex, IndexForRowsAndColumns, EntryForRowsAndColumns, Coefficient >
(
matrix_to_factor: &MatrixToFactor, iter_row_index: IterRowIndex,
)
->
(
VecOfVec< usize, MatrixToFactor::Coefficient >,
GeneralizedMatchingMatrixWithSequentialOrder< MatrixToFactor::ColumnIndex, MatrixToFactor::RowIndex, MatrixToFactor::Coefficient >,
)
where
IndexForRowsAndColumns: Clone + Debug + Eq + Hash, EntryForRowsAndColumns: PartialEq + KeyValPair< Key=IndexForRowsAndColumns, Val=Coefficient >,
MatrixToFactor: MatrixAlgebra<
ColumnIndex= IndexForRowsAndColumns, RowIndex= IndexForRowsAndColumns, RowEntry= EntryForRowsAndColumns,
ColumnEntry= EntryForRowsAndColumns,
RingOperator: DivisionRingOperations< Element = Coefficient >, Coefficient= Coefficient, >
+ MatrixOracleOperations,
IterRowIndex: IntoIterator< Item = MatrixToFactor::RowIndex >,
Coefficient: Clone + Debug + PartialEq,
{
let ring_operator = matrix_to_factor.ring_operator();
let order_operator_for_row_entries = matrix_to_factor.order_operator_for_row_entries();
let order_operator_for_row_indices = matrix_to_factor.order_operator_for_row_indices();
let matrix_to_factor_with_putback = PutbackIteratorMatrix::new( matrix_to_factor );
let mut entries_to_eliminate_simplified_heap = IteratorsMergedInSortedOrder::new( order_operator_for_row_entries.clone() )
.simplify( ring_operator.clone() ); let mut matching = GeneralizedMatchingMatrixWithSequentialOrder::new(); let mut array_target_comb_inv_off_diag: Vec< Vec< ( usize, MatrixToFactor::Coefficient ) > > = Vec::new();
let mut target_comb_inv_off_diag_row_buffer = Vec::new();
let mut target_comb_inv_off_diag_row_buffer_simplified = Vec::new();
let mut sc_counter = 0;
for row_index in iter_row_index {
if matching.has_a_match_for_column_index( & row_index ) {
continue }
entries_to_eliminate_simplified_heap.unsimplified.clear();
let mut next_iter = matrix_to_factor_with_putback.row( & row_index );
if let Some( leading_entry ) = next_iter.next() {
if matrix_to_factor
.bottom_entry_for_column( &leading_entry.key() )
.as_ref()
== Some(&leading_entry) {
matching.push( row_index, leading_entry.key(), leading_entry.val() ); array_target_comb_inv_off_diag.push( vec![] ); sc_counter += 1; continue
} else {
next_iter.put_back( leading_entry );
}
}
entries_to_eliminate_simplified_heap.unsimplified.insert_one_iter(
next_iter
.into_iter()
.scale_by( MatrixToFactor::RingOperator::one(), ring_operator.clone() ) .peekable()
);
target_comb_inv_off_diag_row_buffer.clear();
let leading_entry_uneliminable_opt =
try_writing_next_row_of_target_comb_to_buffer(
& mut target_comb_inv_off_diag_row_buffer,
& mut entries_to_eliminate_simplified_heap,
& matrix_to_factor_with_putback,
& matching,
& array_target_comb_inv_off_diag,
);
match leading_entry_uneliminable_opt {
Some( leading_entry_uneliminable ) => {
let pivot_column_index = leading_entry_uneliminable.key();
let pivot_coeff = leading_entry_uneliminable.val();
matching.push( row_index, pivot_column_index, pivot_coeff );
target_comb_inv_off_diag_row_buffer.sort_by( |a, b| b.0.cmp( &a.0 ) ); let iter_to_push = target_comb_inv_off_diag_row_buffer
.iter()
.cloned()
.peekable()
.simplify( ring_operator.clone() );
target_comb_inv_off_diag_row_buffer_simplified.clear();
target_comb_inv_off_diag_row_buffer_simplified.extend( iter_to_push );
let num_entries = target_comb_inv_off_diag_row_buffer_simplified.len();
let mut vec_to_push = Vec::with_capacity( num_entries );
vec_to_push.extend( target_comb_inv_off_diag_row_buffer_simplified.drain( 0..num_entries ) );
array_target_comb_inv_off_diag.push( vec_to_push );
},
None => {}
}
}
if array_target_comb_inv_off_diag.is_empty() { return ( VecOfVec::new(array_target_comb_inv_off_diag).ok().unwrap(), matching ) }
array_target_comb_inv_off_diag.shrink_to_fit();
( &mut array_target_comb_inv_off_diag ).reverse();
let num_matched_pairs_minus_one = array_target_comb_inv_off_diag.len() - 1;
for row_vec in array_target_comb_inv_off_diag.iter_mut() {
for entry in row_vec.iter_mut() {
entry.set_key( num_matched_pairs_minus_one - entry.key() )
}
}
matching.reverse_order_of_matches();
( VecOfVec::new(array_target_comb_inv_off_diag).ok().unwrap(), matching )
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex< 'a, MatrixToFactor >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
pub umatch: &'a Umatch< MatrixToFactor > ,
}
impl < 'a, MatrixToFactor >
TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex
< 'a, MatrixToFactor >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
pub fn new( umatch_ref: &'a Umatch< MatrixToFactor > ) -> Self {
TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex{ umatch: umatch_ref }
}
}
impl < 'a, MatrixToFactor, >
Eq for
TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex
< 'a, MatrixToFactor >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RingOperator: DivisionRingOperations,
RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
>,
MatrixToFactor: PartialEq,
MatrixToFactor::Coefficient: Eq,
{}
impl < 'a, MatrixToFactor >
MatrixOracle for
TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex
< 'a, MatrixToFactor >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
type Coefficient = MatrixToFactor::Coefficient;
type RowIndex = MatrixToFactor::ColumnIndex; type ColumnIndex = MatrixToFactor::ColumnIndex;
type RowEntry = MatrixToFactor::RowEntry ; type ColumnEntry = MatrixToFactor::RowEntry ;
type Row = LinearCombinationSimplified<
OnlyIndicesInsideCollection< MatrixToFactor::Row, &'a HashMap<MatrixToFactor::ColumnIndex, usize> >,
MatrixToFactor::RingOperator,
MatrixToFactor::OrderOperatorForRowEntries
>; type RowReverse = IntoIter< MatrixToFactor::RowEntry >; type Column = IntoIter< MatrixToFactor::RowEntry >; type ColumnReverse = IterWrappedVec< MatrixToFactor::RowEntry >;
fn structural_nonzero_entry( & self, row: & Self::RowIndex, column: & Self::ColumnIndex ) -> Option< Self::Coefficient >
{
let row_ordinal = self.umatch.generalized_matching_matrix_ref().ordinal_for_column_index( row ).unwrap();
let matched_blocks_of_target_comb_inverse_times_matrix_to_factor_oc = self.umatch.matched_blocks_of_target_comb_inverse_times_matrix_to_factor_oc();
return matched_blocks_of_target_comb_inverse_times_matrix_to_factor_oc.structural_nonzero_entry( &row_ordinal, column );
}
fn has_column_for_index( & self, index: & Self::ColumnIndex) -> bool
{ self.umatch.generalized_matching_matrix_ref().has_a_match_for_column_index( index ) }
fn has_row_for_index( & self, index: & Self::RowIndex) -> bool
{ self.umatch.generalized_matching_matrix_ref().has_a_match_for_column_index( index ) }
fn row( & self, index: & MatrixToFactor::ColumnIndex ) -> Self::Row
{
let ordinal = self.umatch.matching.ordinal_for_column_index( &index ).unwrap();
let proxy
= self.umatch.matched_blocks_of_target_comb_inverse_times_matrix_to_factor_oc();
proxy.row( & ordinal )
}
fn row_reverse( &self, index: &Self::RowIndex ) -> Self::RowReverse {
let mut vec = self.row(index).collect_vec();
(&mut vec).reverse();
vec.into_iter()
}
fn column( &self, index: &Self::ColumnIndex ) -> Self::Column {
let mut vec = self.column_reverse(index).collect_vec();
(& mut vec).reverse();
vec.into_iter()
}
fn column_reverse( &self, index: &Self::ColumnIndex ) -> Self::ColumnReverse
{
let col_indexed_by_ordinal = self.umatch.matched_blocks_of_target_comb_inverse_times_matrix_to_factor_oc().column_reverse( & index );
let mut col_reindexed = col_indexed_by_ordinal.map(
|x|
MatrixToFactor::RowEntry::new(
self.umatch.matching.column_index_for_ordinal( x.0 ),
x.1
)
)
.collect_vec();
col_reindexed.shrink_to_fit();
let order_decider =
ReverseOrder::new( self.umatch.order_operator_for_row_entries()
);
let order_decider_function = |x: &MatrixToFactor::RowEntry, y: &MatrixToFactor::RowEntry| order_decider.judge_partial_cmp(x, y).unwrap();
col_reindexed.sort_by( order_decider_function );
IterWrappedVec::new( col_reindexed )
}
}
impl < 'a, MatrixToFactor >
MatrixAlgebra for
TargetCombInverseTimesMatrixToFactorMatchedBlockRowsIndexedByColumnIndex
< 'a, MatrixToFactor >
where
MatrixToFactor: MatrixAlgebra<
ColumnIndex: Hash, RowIndex: Hash, RowEntry: KeyValPair,
ColumnEntry: KeyValPair,
RingOperator: DivisionRingOperations,
>,
{
type OrderOperatorForColumnEntries = MatrixToFactor::OrderOperatorForRowEntries;
type RingOperator = MatrixToFactor::RingOperator;
type OrderOperatorForRowEntries = MatrixToFactor::OrderOperatorForRowEntries;
type OrderOperatorForRowIndices = MatrixToFactor::OrderOperatorForColumnIndices;
type OrderOperatorForColumnIndices = MatrixToFactor::OrderOperatorForColumnIndices;
fn ring_operator( &self ) -> Self::RingOperator {
self.umatch.ring_operator()
}
fn order_operator_for_row_entries( &self ) -> Self::OrderOperatorForRowEntries {
self.umatch.matrix_to_factor.order_operator_for_row_entries()
}
fn order_operator_for_row_indices( &self ) -> Self::OrderOperatorForRowIndices {
self.umatch.matrix_to_factor.order_operator_for_column_indices()
}
fn order_operator_for_column_entries( &self ) -> Self::OrderOperatorForColumnEntries {
self.umatch.matrix_to_factor.order_operator_for_row_entries()
}
fn order_operator_for_column_indices( &self ) -> Self::OrderOperatorForColumnIndices {
self.umatch.matrix_to_factor.order_operator_for_column_indices()
}
}