1use std::{cmp::Ordering, collections::HashMap};
2
3use log::{error, warn};
4
5pub trait DefragEngine: Send + Sync {
7 fn update<'a>(
10 &mut self,
11 id: u32,
12 offset: usize,
13 more_fragments: bool,
14 frag: &'a [u8],
15 ) -> Fragment<'a>;
16}
17
18pub enum Fragment<'a> {
19 NoFrag(&'a [u8]),
21 Complete(Vec<u8>),
23 Incomplete,
25 Error,
27}
28
29struct DefragData {
30 buffer: Vec<u8>,
31 last_complete_offset: usize,
32 next_data: Vec<u8>,
33 next_offset: Option<usize>,
34 next_is_complete: bool,
35}
36
37impl DefragData {
38 fn new(data: &[u8], offset: usize, complete: bool) -> DefragData {
39 if offset == 0 {
40 DefragData {
41 buffer: data.to_vec(),
42 last_complete_offset: data.len(),
43 next_data: Vec::new(),
44 next_offset: None,
45 next_is_complete: complete,
46 }
47 } else {
48 DefragData {
49 buffer: Vec::new(),
50 last_complete_offset: 0,
51 next_data: data.to_vec(),
52 next_offset: Some(offset),
53 next_is_complete: complete,
54 }
55 }
56 }
57}
58
59pub struct IPDefragEngine {
60 ip_fragments: HashMap<u32, DefragData>,
63}
64
65impl IPDefragEngine {
66 pub fn new() -> IPDefragEngine {
67 IPDefragEngine {
68 ip_fragments: HashMap::new(),
69 }
70 }
71}
72
73impl DefragEngine for IPDefragEngine {
74 fn update<'a>(
75 &mut self,
76 id: u32,
77 frag_offset: usize,
78 more_fragments: bool,
79 frag: &'a [u8],
80 ) -> Fragment<'a> {
81 if !more_fragments && frag_offset == 0 {
83 return Fragment::NoFrag(frag);
84 }
85 if let Some(f) = self.ip_fragments.get_mut(&id) {
87 if frag_offset > f.last_complete_offset {
88 if let Some(_next_offset) = f.next_offset {
89 warn!("defrag: maybe second hole");
90 return Fragment::Error;
93 } else {
94 warn!(
96 "defrag: hole detected key={} len={} next offset={}",
97 id,
98 frag.len(),
99 frag_offset
100 );
101 f.next_data.extend_from_slice(frag);
102 f.next_offset = Some(frag_offset);
103 f.next_is_complete = !more_fragments;
104 return Fragment::Incomplete;
105 }
106 } else {
107 warn!(
108 "defrag: adding data to buffer key={} len={} offset={}",
109 id,
110 frag.len(),
111 frag_offset
112 );
113 if frag_offset < f.buffer.len() {
114 warn!(
115 "defrag: overlapping data frag_offset {}, last_complete_offset={}",
116 frag_offset, f.last_complete_offset
117 );
118 f.buffer.truncate(frag_offset);
119 }
120 f.buffer.extend_from_slice(frag);
121 if let Some(next_offset) = f.next_offset {
122 let new_buffer_len = f.buffer.len();
125 if new_buffer_len >= next_offset {
126 warn!("defrag: checking hole");
127 if new_buffer_len > next_offset + f.next_data.len() {
128 warn!("defrag: hole completely covered by overlapping data");
129 f.next_data.clear();
130 f.next_offset = None;
131 f.next_is_complete = false;
132 } else {
133 match new_buffer_len.cmp(&next_offset) {
135 Ordering::Greater => {
136 warn!("defrag: hole partially covered");
137 let bytes_to_skip = next_offset - new_buffer_len;
139 f.buffer.extend_from_slice(&f.next_data[bytes_to_skip..]);
140 f.last_complete_offset = f.buffer.len();
141 f.next_data.clear();
142 f.next_offset = None;
144 }
145 Ordering::Equal => {
146 warn!("defrag: hole exactly covered (probably a reorder)");
147 f.buffer.append(&mut f.next_data);
148 f.next_offset = None;
149 f.last_complete_offset = f.buffer.len();
150 }
151 Ordering::Less => {
152 }
154 }
155 }
156 }
157 } else {
158 f.last_complete_offset = f.buffer.len();
159 }
160 }
161 if (!more_fragments || f.next_is_complete) && f.next_offset.is_none() {
163 match self.ip_fragments.remove(&id) {
164 Some(f) => {
165 warn!("defrag: done for id {}", id);
166 return Fragment::Complete(f.buffer);
167 }
168 None => {
169 error!("defrag: could not remove entry (while we know it exists!)");
170 return Fragment::Error;
171 }
172 }
173 }
174 Fragment::Incomplete
175 } else {
176 warn!(
178 "defrag: inserting buffer key={} len={} offset={}",
179 id,
180 frag.len(),
181 frag_offset
182 );
183 self.ip_fragments
184 .insert(id, DefragData::new(frag, frag_offset, !more_fragments));
185 Fragment::Incomplete
186 }
187 }
188}