use scirs2_core::ndarray::{Array1, Array2};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum AdmmError {
#[error("Dimension mismatch: expected {expected}, got {got}")]
DimensionMismatch { expected: usize, got: usize },
#[error("No subproblems added")]
NoSubproblems,
#[error("Subproblem solve failed: {0}")]
SubproblemFailed(String),
#[error("Maximum iterations reached without convergence")]
MaxIterationsReached,
#[error("Numerical error: {0}")]
NumericalError(String),
}
#[derive(Debug, Clone)]
pub struct AdmmConfig {
pub rho: f32,
pub max_iterations: usize,
pub abs_tol: f32,
pub rel_tol: f32,
pub over_relaxation: f32,
pub verbose: bool,
}
impl Default for AdmmConfig {
fn default() -> Self {
Self {
rho: 1.0,
max_iterations: 100,
abs_tol: 1e-4,
rel_tol: 1e-3,
over_relaxation: 1.0,
verbose: false,
}
}
}
impl AdmmConfig {
pub fn new() -> Self {
Self::default()
}
pub fn with_rho(mut self, rho: f32) -> Self {
self.rho = rho;
self
}
pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
self.max_iterations = max_iterations;
self
}
pub fn with_abs_tol(mut self, abs_tol: f32) -> Self {
self.abs_tol = abs_tol;
self
}
pub fn with_rel_tol(mut self, rel_tol: f32) -> Self {
self.rel_tol = rel_tol;
self
}
pub fn with_over_relaxation(mut self, alpha: f32) -> Self {
self.over_relaxation = alpha;
self
}
pub fn with_verbose(mut self, verbose: bool) -> Self {
self.verbose = verbose;
self
}
}
#[derive(Debug, Clone)]
pub struct AdmmResult {
pub solution: Array1<f32>,
pub iterations: usize,
pub converged: bool,
pub primal_residual: f32,
pub dual_residual: f32,
pub objective: f32,
}
pub trait AdmmSubproblem: Send + Sync {
fn solve(&self, z: &Array1<f32>, u: &Array1<f32>, rho: f32) -> Result<Array1<f32>, AdmmError>;
fn objective(&self, x: &Array1<f32>) -> f32;
fn dim(&self) -> usize;
}
#[inline]
fn l2_norm(v: &Array1<f32>) -> f32 {
v.iter().map(|&x| x * x).sum::<f32>().sqrt()
}
fn soft_threshold(v: &Array1<f32>, kappa: f32) -> Array1<f32> {
v.mapv(|x| {
if x > kappa {
x - kappa
} else if x < -kappa {
x + kappa
} else {
0.0
}
})
}
fn box_clip(v: &Array1<f32>, lb: &Array1<f32>, ub: &Array1<f32>) -> Array1<f32> {
v.iter()
.zip(lb.iter())
.zip(ub.iter())
.map(|((&vi, &li), &ui)| vi.clamp(li, ui))
.collect()
}
fn gauss_seidel_solve(
q: &Array2<f32>,
rho: f32,
b: &Array1<f32>,
) -> Result<Array1<f32>, AdmmError> {
let n = b.len();
if q.nrows() != n || q.ncols() != n {
return Err(AdmmError::DimensionMismatch {
expected: n,
got: q.nrows(),
});
}
let mut x = Array1::<f32>::zeros(n);
let max_inner = 200usize;
for _iter in 0..max_inner {
let x_old = x.clone();
for i in 0..n {
let a_ii = q[[i, i]] + rho;
if a_ii.abs() < f32::EPSILON {
return Err(AdmmError::NumericalError(format!(
"Near-zero diagonal at index {i}"
)));
}
let mut sum = 0.0f32;
for j in 0..n {
if j != i {
sum += (q[[i, j]] + if i == j { rho } else { 0.0 }) * x[j];
}
}
x[i] = (b[i] - sum) / a_ii;
}
let diff: f32 = x
.iter()
.zip(x_old.iter())
.map(|(a, b)| (a - b) * (a - b))
.sum::<f32>()
.sqrt();
if diff < 1e-8 {
break;
}
}
for &xi in x.iter() {
if !xi.is_finite() {
return Err(AdmmError::NumericalError(
"Gauss-Seidel produced non-finite value".into(),
));
}
}
Ok(x)
}
#[derive(Debug, Clone)]
pub struct QuadraticSubproblem {
pub q: Array2<f32>,
pub c: Array1<f32>,
pub lb: Option<Array1<f32>>,
pub ub: Option<Array1<f32>>,
}
impl QuadraticSubproblem {
pub fn new(q: Array2<f32>, c: Array1<f32>) -> Self {
Self {
q,
c,
lb: None,
ub: None,
}
}
pub fn with_bounds(mut self, lb: Array1<f32>, ub: Array1<f32>) -> Self {
self.lb = Some(lb);
self.ub = Some(ub);
self
}
}
impl AdmmSubproblem for QuadraticSubproblem {
fn solve(&self, z: &Array1<f32>, u: &Array1<f32>, rho: f32) -> Result<Array1<f32>, AdmmError> {
let n = self.dim();
if z.len() != n || u.len() != n {
return Err(AdmmError::DimensionMismatch {
expected: n,
got: z.len(),
});
}
let rhs: Array1<f32> = (z - u).mapv(|v| rho * v) - &self.c;
let x = gauss_seidel_solve(&self.q, rho, &rhs)?;
let x = match (&self.lb, &self.ub) {
(Some(lb), Some(ub)) => box_clip(&x, lb, ub),
(Some(lb), None) => x.mapv(|v| v.max(lb[0])),
(None, Some(ub)) => x.mapv(|v| v.min(ub[0])),
(None, None) => x,
};
Ok(x)
}
fn objective(&self, x: &Array1<f32>) -> f32 {
let qx: Array1<f32> = self.q.dot(x);
0.5 * x.iter().zip(qx.iter()).map(|(a, b)| a * b).sum::<f32>()
+ x.iter().zip(self.c.iter()).map(|(a, b)| a * b).sum::<f32>()
}
fn dim(&self) -> usize {
self.c.len()
}
}
#[derive(Debug, Clone)]
pub struct LassoSubproblem {
pub a: Array2<f32>,
pub b: Array1<f32>,
pub lambda: f32,
}
impl LassoSubproblem {
pub fn new(a: Array2<f32>, b: Array1<f32>, lambda: f32) -> Self {
Self { a, b, lambda }
}
}
impl AdmmSubproblem for LassoSubproblem {
fn solve(&self, z: &Array1<f32>, u: &Array1<f32>, rho: f32) -> Result<Array1<f32>, AdmmError> {
let n = self.dim();
if z.len() != n || u.len() != n {
return Err(AdmmError::DimensionMismatch {
expected: n,
got: z.len(),
});
}
let at = self.a.t();
let ata: Array2<f32> = at.dot(&self.a);
let atb: Array1<f32> = at.dot(&self.b);
let rhs: Array1<f32> = atb + (z - u).mapv(|v| rho * v);
let v = gauss_seidel_solve(&ata, rho, &rhs)?;
let kappa = self.lambda / rho;
Ok(soft_threshold(&v, kappa))
}
fn objective(&self, x: &Array1<f32>) -> f32 {
let ax_minus_b: Array1<f32> = self.a.dot(x) - &self.b;
let l2_sq: f32 = ax_minus_b.iter().map(|&v| v * v).sum();
let l1: f32 = x.iter().map(|&v| v.abs()).sum();
l2_sq + self.lambda * l1
}
fn dim(&self) -> usize {
self.a.ncols()
}
}
#[derive(Debug, Clone)]
pub struct ProjectionSubproblem {
pub lb: Array1<f32>,
pub ub: Array1<f32>,
}
impl ProjectionSubproblem {
pub fn new(lb: Array1<f32>, ub: Array1<f32>) -> Self {
Self { lb, ub }
}
}
impl AdmmSubproblem for ProjectionSubproblem {
fn solve(&self, z: &Array1<f32>, u: &Array1<f32>, _rho: f32) -> Result<Array1<f32>, AdmmError> {
let n = self.dim();
if z.len() != n || u.len() != n {
return Err(AdmmError::DimensionMismatch {
expected: n,
got: z.len(),
});
}
let v: Array1<f32> = z - u;
Ok(box_clip(&v, &self.lb, &self.ub))
}
fn objective(&self, _x: &Array1<f32>) -> f32 {
0.0
}
fn dim(&self) -> usize {
self.lb.len()
}
}
type AdmmSweepOutput = (Vec<Array1<f32>>, Vec<Array1<f32>>, Array1<f32>, f32, f32);
fn admm_sweep(
subproblems: &[Box<dyn AdmmSubproblem>],
z: &Array1<f32>,
us: &[Array1<f32>],
rho: f32,
alpha: f32,
weights: Option<&Array1<f32>>,
) -> Result<AdmmSweepOutput, AdmmError> {
let n_agents = subproblems.len();
let dim = z.len();
use rayon::prelude::*;
let x_results: Vec<Result<Array1<f32>, AdmmError>> = subproblems
.par_iter()
.enumerate()
.map(|(i, sp)| sp.solve(z, &us[i], rho))
.collect();
let mut new_xs: Vec<Array1<f32>> = Vec::with_capacity(n_agents);
for result in x_results {
new_xs.push(result?);
}
let z_old = z.clone();
let (x_avg, u_avg) = match weights {
Some(w) => {
let mut xa = Array1::<f32>::zeros(dim);
let mut ua = Array1::<f32>::zeros(dim);
for i in 0..n_agents {
xa = xa + new_xs[i].mapv(|v| v * w[i]);
ua = ua + us[i].mapv(|v| v * w[i]);
}
(xa, ua)
}
None => {
let mut xa = Array1::<f32>::zeros(dim);
let mut ua = Array1::<f32>::zeros(dim);
for i in 0..n_agents {
xa += &new_xs[i];
ua += &us[i];
}
let inv_n = 1.0 / n_agents as f32;
(xa.mapv(|v| v * inv_n), ua.mapv(|v| v * inv_n))
}
};
let z_new: Array1<f32> = x_avg.mapv(|v| alpha * v) + z_old.mapv(|v| (1.0 - alpha) * v) + &u_avg;
let mut new_us: Vec<Array1<f32>> = Vec::with_capacity(n_agents);
let mut primal_sq = 0.0f32;
for i in 0..n_agents {
let x_tilde: Array1<f32> =
new_xs[i].mapv(|v| alpha * v) + z_old.mapv(|v| (1.0 - alpha) * v);
let residual_i: Array1<f32> = &x_tilde - &z_new;
primal_sq += residual_i.iter().map(|&v| v * v).sum::<f32>();
let u_new: Array1<f32> = &us[i] + &residual_i;
new_us.push(u_new);
}
let primal_res = (primal_sq / n_agents as f32).sqrt();
let dual_res = rho * l2_norm(&(&z_new - &z_old));
Ok((new_xs, new_us, z_new, primal_res, dual_res))
}
fn check_convergence(
primal_res: f32,
dual_res: f32,
config: &AdmmConfig,
n_agents: usize,
dim: usize,
) -> bool {
let scale = ((n_agents * dim) as f32).sqrt();
let eps_primal = config.abs_tol * scale + config.rel_tol;
let eps_dual = config.abs_tol * scale + config.rel_tol;
primal_res < eps_primal && dual_res < eps_dual
}
pub struct DistributedAdmm {
config: AdmmConfig,
subproblems: Vec<Box<dyn AdmmSubproblem>>,
dim: usize,
}
impl DistributedAdmm {
pub fn new(config: AdmmConfig, dim: usize) -> Self {
Self {
config,
subproblems: Vec::new(),
dim,
}
}
pub fn add_subproblem(&mut self, subproblem: Box<dyn AdmmSubproblem>) -> Result<(), AdmmError> {
if subproblem.dim() != self.dim {
return Err(AdmmError::DimensionMismatch {
expected: self.dim,
got: subproblem.dim(),
});
}
self.subproblems.push(subproblem);
Ok(())
}
pub fn num_subproblems(&self) -> usize {
self.subproblems.len()
}
pub fn solve(&self) -> Result<AdmmResult, AdmmError> {
self.solve_warm(Array1::zeros(self.dim))
}
pub fn solve_warm(&self, z_init: Array1<f32>) -> Result<AdmmResult, AdmmError> {
if self.subproblems.is_empty() {
return Err(AdmmError::NoSubproblems);
}
if z_init.len() != self.dim {
return Err(AdmmError::DimensionMismatch {
expected: self.dim,
got: z_init.len(),
});
}
let n_agents = self.subproblems.len();
let mut z = z_init;
let mut us: Vec<Array1<f32>> = vec![Array1::zeros(self.dim); n_agents];
let mut primal_res = f32::INFINITY;
let mut dual_res = f32::INFINITY;
let mut iterations = 0usize;
let mut converged = false;
for iter in 0..self.config.max_iterations {
iterations = iter + 1;
let (new_xs, new_us, z_new, pr, dr) = admm_sweep(
&self.subproblems,
&z,
&us,
self.config.rho,
self.config.over_relaxation,
None,
)?;
let _ = new_xs; us = new_us;
z = z_new;
primal_res = pr;
dual_res = dr;
if self.config.verbose {
tracing::debug!(iter = iterations, primal_res, dual_res, "ADMM iteration");
}
if check_convergence(primal_res, dual_res, &self.config, n_agents, self.dim) {
converged = true;
break;
}
}
let objective: f32 = self.subproblems.iter().map(|sp| sp.objective(&z)).sum();
Ok(AdmmResult {
solution: z,
iterations,
converged,
primal_residual: primal_res,
dual_residual: dual_res,
objective,
})
}
}
pub struct ConsensusAdmm {
config: AdmmConfig,
subproblems: Vec<Box<dyn AdmmSubproblem>>,
dim: usize,
weights: Option<Array1<f32>>,
}
impl ConsensusAdmm {
pub fn new(config: AdmmConfig, dim: usize) -> Self {
Self {
config,
subproblems: Vec::new(),
dim,
weights: None,
}
}
pub fn new_weighted(
config: AdmmConfig,
dim: usize,
weights: Array1<f32>,
) -> Result<Self, AdmmError> {
let sum: f32 = weights.iter().sum();
if (sum - 1.0).abs() > 1e-4 {
return Err(AdmmError::NumericalError(format!(
"Weights must sum to 1.0, got {sum:.6}"
)));
}
Ok(Self {
config,
subproblems: Vec::new(),
dim,
weights: Some(weights),
})
}
pub fn add_subproblem(&mut self, subproblem: Box<dyn AdmmSubproblem>) -> Result<(), AdmmError> {
if subproblem.dim() != self.dim {
return Err(AdmmError::DimensionMismatch {
expected: self.dim,
got: subproblem.dim(),
});
}
self.subproblems.push(subproblem);
Ok(())
}
pub fn solve(&self) -> Result<AdmmResult, AdmmError> {
if self.subproblems.is_empty() {
return Err(AdmmError::NoSubproblems);
}
let n_agents = self.subproblems.len();
if let Some(w) = &self.weights {
if w.len() != n_agents {
return Err(AdmmError::DimensionMismatch {
expected: n_agents,
got: w.len(),
});
}
}
let mut z = Array1::<f32>::zeros(self.dim);
let mut us: Vec<Array1<f32>> = vec![Array1::zeros(self.dim); n_agents];
let mut primal_res = f32::INFINITY;
let mut dual_res = f32::INFINITY;
let mut iterations = 0usize;
let mut converged = false;
for iter in 0..self.config.max_iterations {
iterations = iter + 1;
let (new_xs, new_us, z_new, pr, dr) = admm_sweep(
&self.subproblems,
&z,
&us,
self.config.rho,
self.config.over_relaxation,
self.weights.as_ref(),
)?;
let _ = new_xs;
us = new_us;
z = z_new;
primal_res = pr;
dual_res = dr;
if self.config.verbose {
tracing::debug!(
iter = iterations,
primal_res,
dual_res,
"ConsensusADMM iteration"
);
}
if check_convergence(primal_res, dual_res, &self.config, n_agents, self.dim) {
converged = true;
break;
}
}
let objective: f32 = self.subproblems.iter().map(|sp| sp.objective(&z)).sum();
Ok(AdmmResult {
solution: z,
iterations,
converged,
primal_residual: primal_res,
dual_residual: dual_res,
objective,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::{Array1, Array2};
fn eye(n: usize, scale: f32) -> Array2<f32> {
let mut m = Array2::<f32>::zeros((n, n));
for i in 0..n {
m[[i, i]] = scale;
}
m
}
#[test]
fn test_admm_config_default() {
let cfg = AdmmConfig::default();
assert!(cfg.rho > 0.0, "rho must be positive");
assert!(cfg.max_iterations > 0, "max_iterations must be positive");
assert!(cfg.abs_tol > 0.0, "abs_tol must be positive");
assert!(cfg.rel_tol > 0.0, "rel_tol must be positive");
assert!(
(0.0..2.0).contains(&cfg.over_relaxation),
"over_relaxation must be in (0, 2)"
);
}
#[test]
fn test_projection_subproblem() {
let lb = Array1::from_vec(vec![0.0, -1.0, 2.0]);
let ub = Array1::from_vec(vec![1.0, 1.0, 5.0]);
let sp = ProjectionSubproblem::new(lb.clone(), ub.clone());
let z = Array1::from_vec(vec![-2.0, 3.0, 10.0]);
let u = Array1::zeros(3);
let x = sp.solve(&z, &u, 1.0).expect("projection should succeed");
assert!((x[0] - 0.0).abs() < 1e-6, "should clip to lb[0]");
assert!((x[1] - 1.0).abs() < 1e-6, "should clip to ub[1]");
assert!((x[2] - 5.0).abs() < 1e-6, "should clip to ub[2]");
}
#[test]
fn test_lasso_subproblem_soft_threshold() {
let n = 3usize;
let a = eye(n, 1.0);
let b = Array1::zeros(n);
let lambda = 10.0f32;
let sp = LassoSubproblem::new(a, b, lambda);
let z = Array1::from_vec(vec![0.5, -0.5, 0.2]);
let u = Array1::zeros(n);
let x = sp.solve(&z, &u, 1.0).expect("lasso solve");
for &xi in x.iter() {
assert!(xi.abs() < 0.6, "soft-threshold should shrink the solution");
}
}
#[test]
fn test_quadratic_subproblem_unconstrained() {
let n = 2usize;
let q = eye(n, 1.0);
let c = Array1::from_vec(vec![-2.0, -2.0]);
let sp = QuadraticSubproblem::new(q, c);
let z = Array1::from_vec(vec![2.0, 2.0]);
let u = Array1::zeros(n);
let x = sp.solve(&z, &u, 1.0).expect("quadratic solve");
assert!((x[0] - 2.0).abs() < 1e-3, "x[0] ≈ 2: got {}", x[0]);
assert!((x[1] - 2.0).abs() < 1e-3, "x[1] ≈ 2: got {}", x[1]);
}
#[test]
fn test_quadratic_subproblem_constrained() {
let n = 2usize;
let q = eye(n, 1.0);
let c = Array1::from_vec(vec![-5.0, -5.0]);
let lb = Array1::from_vec(vec![0.0, 0.0]);
let ub = Array1::from_vec(vec![1.0, 1.0]); let sp = QuadraticSubproblem::new(q, c).with_bounds(lb, ub);
let z = Array1::from_vec(vec![3.0, 3.0]);
let u = Array1::zeros(n);
let x = sp.solve(&z, &u, 1.0).expect("constrained quadratic solve");
assert!(x[0] <= 1.0 + 1e-6, "x[0] must be ≤ ub");
assert!(x[1] <= 1.0 + 1e-6, "x[1] must be ≤ ub");
assert!(x[0] >= 0.0 - 1e-6, "x[0] must be ≥ lb");
}
#[test]
fn test_distributed_admm_consensus() {
let dim = 2usize;
let cfg = AdmmConfig::default()
.with_rho(2.0)
.with_max_iterations(200)
.with_abs_tol(1e-4);
let mut solver = DistributedAdmm::new(cfg, dim);
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![1.0, 1.0]),
Array1::from_vec(vec![3.0, 3.0]),
)))
.expect("add agent 0");
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.0, 0.0]),
Array1::from_vec(vec![1.0, 1.0]),
)))
.expect("add agent 1");
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![1.0, 0.0]),
Array1::from_vec(vec![2.0, 2.0]),
)))
.expect("add agent 2");
let result = solver.solve().expect("distributed ADMM solve");
assert!((result.solution[0] - 1.0).abs() < 0.1, "x[0] ≈ 1");
assert!((result.solution[1] - 1.0).abs() < 0.1, "x[1] ≈ 1");
}
#[test]
fn test_distributed_admm_convergence() {
let dim = 4usize;
let cfg = AdmmConfig::default()
.with_max_iterations(500)
.with_abs_tol(1e-3);
let mut solver = DistributedAdmm::new(cfg, dim);
for _ in 0..3 {
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.0; dim]),
Array1::from_vec(vec![1.0; dim]),
)))
.expect("add subproblem");
}
let result = solver.solve().expect("solve");
assert!(result.primal_residual.is_finite());
assert!(result.dual_residual.is_finite());
}
#[test]
fn test_distributed_admm_convergence_flag() {
let dim = 2usize;
let cfg = AdmmConfig::default()
.with_max_iterations(1000)
.with_abs_tol(1e-3)
.with_rel_tol(1e-2);
let mut solver = DistributedAdmm::new(cfg, dim);
for _ in 0..2 {
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.0, 0.0]),
Array1::from_vec(vec![1.0, 1.0]),
)))
.expect("add subproblem");
}
let result = solver.solve().expect("solve");
assert!(result.converged, "should have converged");
}
#[test]
fn test_consensus_admm_weighted() {
let dim = 1usize;
let weights = Array1::from_vec(vec![0.3f32, 0.7]);
let cfg = AdmmConfig::default()
.with_max_iterations(500)
.with_abs_tol(1e-3);
let mut solver = ConsensusAdmm::new_weighted(cfg, dim, weights).expect("new_weighted");
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.0]),
Array1::from_vec(vec![0.4]),
)))
.expect("add agent 0");
solver
.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.6]),
Array1::from_vec(vec![1.0]),
)))
.expect("add agent 1");
let result = solver.solve().expect("weighted consensus solve");
assert!(result.solution[0] >= 0.0 - 1e-4);
assert!(result.solution[0] <= 1.0 + 1e-4);
}
#[test]
fn test_admm_warm_start() {
let dim = 3usize;
let cfg = AdmmConfig::default()
.with_rho(2.0)
.with_max_iterations(500)
.with_abs_tol(1e-5);
let make_solver = |cfg: AdmmConfig| -> DistributedAdmm {
let mut s = DistributedAdmm::new(cfg, dim);
for _ in 0..2 {
s.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.5, 0.5, 0.5]),
Array1::from_vec(vec![1.0, 1.0, 1.0]),
)))
.expect("add subproblem");
}
s
};
let cold = make_solver(cfg.clone()).solve().expect("cold solve");
let warm = make_solver(cfg)
.solve_warm(cold.solution.clone())
.expect("warm solve");
assert!(
warm.iterations <= cold.iterations,
"warm ({}) should not exceed cold ({})",
warm.iterations,
cold.iterations
);
}
#[test]
fn test_admm_no_subproblems_error() {
let cfg = AdmmConfig::default();
let solver = DistributedAdmm::new(cfg, 3);
let result = solver.solve();
assert!(
matches!(result, Err(AdmmError::NoSubproblems)),
"expected NoSubproblems error"
);
}
#[test]
fn test_admm_dimension_mismatch() {
let cfg = AdmmConfig::default();
let mut solver = DistributedAdmm::new(cfg, 3);
let result = solver.add_subproblem(Box::new(ProjectionSubproblem::new(
Array1::from_vec(vec![0.0; 5]),
Array1::from_vec(vec![1.0; 5]),
)));
assert!(
matches!(
result,
Err(AdmmError::DimensionMismatch {
expected: 3,
got: 5
})
),
"expected DimensionMismatch error"
);
}
}