byte_arithmetic/
lib.rs

1use itertools::{EitherOrBoth, Itertools};
2use std::ops::BitXor;
3use serde::{Deserialize, Serialize};
4
5/// Base256 Object
6///
7/// Vec<u8> object that implements a subset of basic arithmetic, namely addition, subtraction,
8/// and integer multiplication.
9///
10/// Also implements a wrapped addition around a specific byte length, for the purpose of adding
11/// hashes of a specific size, i.e. 32byte hashes.
12///
13/// Multiplication is implemented as multiplicative addition.
14/// ```
15/// use byte_arithmetic::Base256;
16/// assert_eq!(
17///     Base256::new(vec![1,2,3]) + Base256::new(vec![1,2,3]),
18///     Base256::new(vec![2,4,6])
19/// );
20/// assert_eq!(
21///     Base256::new(vec![1,2,3]) * 3,
22///     Base256::new(vec![3,6,9])
23/// );
24/// assert_eq!(
25///      Base256::new(vec![255, 255]).wrapped_add(
26///             Base256::new(vec![0, 1]), 2
27///         ), Base256::new(vec![0, 0]));
28/// assert_eq!(
29///      Base256::new(vec![255, 255]).wrapped_add(
30///             Base256::new(vec![0, 1]), 3
31///         ), Base256::new(vec![1, 0, 0]));
32/// ```
33#[derive(PartialEq, Ord, PartialOrd, Eq, Debug, Clone, Hash, Serialize, Deserialize)]
34pub struct Base256 {
35    inner: Vec<u8>,
36}
37
38impl std::ops::Deref for Base256 {
39    type Target = Vec<u8>;
40
41    fn deref(&self) -> &Self::Target {
42        &self.inner
43    }
44}
45
46impl From<Base256> for Vec<u8> {
47    fn from(base256: Base256) -> Self {
48        base256.inner
49    }
50}
51
52impl From<Vec<u8>> for Base256 {
53    fn from(buffer: Vec<u8>) -> Self {
54        Base256::new(buffer)
55    }
56}
57
58impl Base256 {
59    pub fn new(inner: Vec<u8>) -> Self {
60        Base256 { inner }
61    }
62
63    pub fn empty() -> Self { Base256 { inner: vec![] }}
64
65    pub fn scalar_multiply(self, value: u8) -> Self {
66        let mut res = Base256::new(vec![0]);
67        for _ in 0..value {
68            res = res + self.clone();
69        }
70        res
71    }
72
73    pub fn wrapped_scalar_multiply(self, value: u8, byte_length: usize) -> Self {
74        let mut res = Base256::new(vec![0]);
75        for _ in 0..value {
76            res = res.wrapped_add(self.clone(), byte_length);
77        }
78        res
79    }
80
81    pub fn wrapped_add(self, other: Self, byte_length: usize) -> Self {
82        let mut res = self + other;
83        let inner_len = res.inner.len();
84        if inner_len > byte_length {
85            res.inner = res.inner.into_iter().skip(inner_len - byte_length).collect();
86        }
87        res
88    }
89}
90
91impl BitXor for Base256 {
92    type Output = Base256;
93
94    fn bitxor(self, rhs: Self) -> Self::Output {
95        Base256::new(
96            self.iter()
97                .zip_longest(rhs.iter())
98                .map(|x| match x {
99                    EitherOrBoth::Both(a, b) => *a ^ *b,
100                    EitherOrBoth::Left(a) => *a,
101                    EitherOrBoth::Right(b) => *b,
102                })
103                .collect(),
104        )
105    }
106}
107
108impl std::ops::Mul<u8> for Base256 {
109    type Output = Base256;
110
111    fn mul(self, rhs: u8) -> Self::Output {
112        self.scalar_multiply(rhs)
113    }
114}
115
116impl std::ops::Sub for Base256 {
117    type Output = Base256;
118
119    fn sub(self, rhs: Self) -> Self::Output {
120        let mut underflow = 0;
121        let mut res: Vec<u8> = Vec::with_capacity(std::cmp::min(self.inner.len(), rhs.inner.len()));
122        if self < rhs {
123            panic!("Underflow")
124        }
125        let mut rev_a = self.inner;
126        let mut rev_b = rhs.inner;
127        rev_a.reverse();
128        rev_b.reverse();
129        for zipped_elem in rev_a.into_iter().zip_longest(rev_b.into_iter()) {
130            let (x, y): (u8, u8) = match zipped_elem {
131                EitherOrBoth::Both(a, b) => (a, b),
132                EitherOrBoth::Left(a) => (a, 0),
133                EitherOrBoth::Right(b) => (0, b),
134            };
135            let (result, local_underflow) = sub_scalar_underflow(x, y, underflow);
136            res.insert(0, result);
137            underflow = local_underflow;
138        }
139        Base256 { inner: res }
140    }
141}
142
143impl std::ops::Add for Base256 {
144    type Output = Base256;
145
146    fn add(self, rhs: Self) -> Self::Output {
147        let mut overflow: u8 = 0;
148        let mut res: Vec<u8> = Vec::with_capacity(std::cmp::max(self.inner.len(), rhs.inner.len()));
149        let mut rev_a = self.inner;
150        let mut rev_b = rhs.inner;
151        rev_a.reverse();
152        rev_b.reverse();
153        for zipped_elem in rev_a.into_iter().zip_longest(rev_b.into_iter()) {
154            let (x, y): (u8, u8) = match zipped_elem {
155                EitherOrBoth::Both(a, b) => (a, b),
156                EitherOrBoth::Left(a) => (a, 0),
157                EitherOrBoth::Right(b) => (0, b),
158            };
159            let (result, local_overflow) = add_scalar_overflow(x, y, overflow);
160            res.insert(0, result);
161            overflow = local_overflow;
162        }
163        if overflow > 0 {
164            res.insert(0, overflow);
165        }
166        Base256 { inner: res }
167    }
168}
169
170// [1, 5] - [0, 8] = [0, 7]
171// (5, 8, 0) => (7, 1)
172// (1, 0, 1) => (0, 0)
173fn sub_scalar_underflow(a: u8, b:u8, underflow: u8) -> (u8, u8) {
174    let mut next_underflow = 0;
175    let res = match a.checked_sub(b) {
176        Some(val) => match val.checked_sub(underflow) {
177            Some(val_underflow) => val_underflow,
178            None => {
179                let res = (a as u16 + 255) - b as u16;
180                next_underflow += 1;
181                res as u8
182            }
183        },
184        None => {
185            let res = (a as u16 + 255) - b as u16;
186            next_underflow += 1;
187            res as u8
188        }
189    };
190    (res, next_underflow)
191}
192
193fn add_scalar_overflow(a: u8, b: u8, overflow: u8) -> (u8, u8) {
194    let mut next_overflow = 0;
195    let res = match a.checked_add(b) {
196        Some(val) => match val.checked_add(overflow) {
197            Some(val_overflow) => val_overflow,
198            None => {
199                // Increase where overflow will happen, then drop the value.
200                let res = val as u16 + overflow as u16;
201                next_overflow = (res / 256) as u8;
202                (res - (next_overflow as u16 * 256)) as u8
203            }
204        },
205        None => {
206            let res = a as u16 + b as u16 + overflow as u16;
207            next_overflow = (res / 256) as u8;
208            (res - (next_overflow as u16 * 256)) as u8
209        }
210    };
211    (res, next_overflow)
212}
213
214#[cfg(test)]
215mod tests {
216    use super::*;
217
218    #[test]
219    fn test_scalar_add() {
220        assert_eq!(add_scalar_overflow(0, 1, 0), (1, 0));
221        assert_eq!(add_scalar_overflow(255, 1, 0), (0, 1));
222        assert_eq!(add_scalar_overflow(255, 255, 2), (0, 2));
223    }
224
225    #[test]
226    fn test_scalar_sub_direct() {
227        assert_eq!(sub_scalar_underflow(5, 8, 0), (252, 1));
228        assert_eq!(sub_scalar_underflow(1, 0, 0), (1, 0));
229        assert_eq!(sub_scalar_underflow(1, 0, 1), (0, 0));
230    }
231
232    #[test]
233    fn test_scalar() {
234        assert_eq!(
235            Base256::new(vec![0]) + Base256::new(vec![1]),
236            Base256::new(vec![1])
237        );
238    }
239
240    #[test]
241    fn test_scalar_sub() {
242        assert_eq!(
243            Base256::new(vec![30]) - Base256::new(vec![25]),
244            Base256::new(vec![5])
245        );
246    }
247
248    #[test]
249    fn test_multiple_sub() {
250        assert_eq!(
251            Base256::new(vec![30, 200]) - Base256::new(vec![30, 170]),
252            Base256::new(vec![0, 30])
253        );
254    }
255
256    #[test]
257    fn test_underflow_multiple_sub() {
258        assert_eq!(
259            Base256::new(vec![200, 200]) - Base256::new(vec![0, 255]),
260            Base256::new(vec![199, 200])
261        );
262    }
263
264    #[test]
265    fn test_multiple() {
266        assert_eq!(
267            Base256::new(vec![1, 2, 3]) + Base256::new(vec![2, 2, 2]),
268            Base256::new(vec![3, 4, 5])
269        );
270    }
271
272    #[test]
273    fn test_overflow_multiple() {
274        assert_eq!(
275            Base256::new(vec![255, 255, 255]) + Base256::new(vec![1]),
276            Base256::new(vec![1, 0, 0, 0])
277        );
278    }
279
280    #[test]
281    fn test_scalar_mult() {
282        assert_eq!(
283            Base256::new(vec![1, 1, 1]).scalar_multiply(3),
284            Base256::new(vec![3, 3, 3])
285        );
286    }
287
288    #[test]
289    fn test_scalar_wrapped_mult() {
290        assert_eq!(
291            Base256::new(vec![1, 1, 1]).wrapped_scalar_multiply(40, 3),
292            Base256::new(vec![40, 40, 40])
293        );
294    }
295
296
297    #[test]
298    fn test_wrapped_addition() {
299        assert_eq!(
300            Base256::new(vec![255, 255]).wrapped_add(
301                Base256::new(vec![0, 1]), 2
302            ),
303            Base256::new(vec![0, 0])
304        );
305
306        assert_eq!(
307            Base256::new(vec![0, 0]).wrapped_add(
308                Base256::new(vec![0, 1]), 2
309            ),
310            Base256::new(vec![0, 1])
311        );
312    }
313}