1use std::collections::HashMap;
2use std::ops::Add;
3use std::time::Duration;
4
5use num::integer::{gcd, lcm};
6use num::traits::Inv;
7use serde::{Deserialize, Serialize};
8use uom::si::rational64::Time as UOM_Time;
9
10use super::dependencies::Origin;
11use super::TypedTrait;
12use crate::config::MemoryBoundMode;
13use crate::hir::{ConcretePacingType, Hir, SRef, StreamAccessKind, WRef, WindowReference};
14use crate::modes::{DepAnaTrait, HirMode, MemBound, MemBoundTrait};
15
16#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
18pub enum MemorizationBound {
19 Unbounded,
21 Bounded(u32),
23}
24
25impl MemorizationBound {
26 const DYNAMIC_DEFAULT_VALUE: MemorizationBound = MemorizationBound::Bounded(0);
27 const STATIC_DEFAULT_VALUE: MemorizationBound = MemorizationBound::Bounded(1);
28
29 pub fn unwrap(self) -> u32 {
33 match self {
34 MemorizationBound::Bounded(b) => b,
35 MemorizationBound::Unbounded => {
36 unreachable!("Called `MemorizationBound::unwrap()` on an `Unbounded` value.")
37 }
38 }
39 }
40
41 pub fn as_opt(self) -> Option<u32> {
45 match self {
46 MemorizationBound::Bounded(b) => Some(b),
47 MemorizationBound::Unbounded => None,
48 }
49 }
50
51 pub(crate) fn default_value(mode: MemoryBoundMode) -> MemorizationBound {
53 match mode {
54 MemoryBoundMode::Static => Self::STATIC_DEFAULT_VALUE,
55 MemoryBoundMode::Dynamic => Self::DYNAMIC_DEFAULT_VALUE,
56 }
57 }
58}
59
60impl Add for MemorizationBound {
61 type Output = Self;
62
63 fn add(self, rhs: Self) -> Self::Output {
64 match (self, rhs) {
65 (MemorizationBound::Unbounded, MemorizationBound::Unbounded)
66 | (MemorizationBound::Unbounded, MemorizationBound::Bounded(_))
67 | (MemorizationBound::Bounded(_), MemorizationBound::Unbounded) => {
68 MemorizationBound::Unbounded
69 }
70 (MemorizationBound::Bounded(lhs), MemorizationBound::Bounded(rhs)) => {
71 MemorizationBound::Bounded(lhs + rhs)
72 }
73 }
74 }
75}
76
77impl PartialOrd for MemorizationBound {
78 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
79 use std::cmp::Ordering;
80
81 use MemorizationBound::*;
82 match (self, other) {
83 (Unbounded, Unbounded) => None,
84 (Bounded(_), Unbounded) => Some(Ordering::Less),
85 (Unbounded, Bounded(_)) => Some(Ordering::Greater),
86 (Bounded(b1), Bounded(b2)) => Some(b1.cmp(b2)),
87 }
88 }
89}
90
91impl MemBoundTrait for MemBound {
92 fn memory_bound(&self, sr: SRef) -> MemorizationBound {
93 self.memory_bound_per_stream[&sr]
94 }
95
96 fn num_buckets(&self, wr: WRef) -> MemorizationBound {
97 self.memory_bound_per_window[&wr]
98 }
99
100 fn bucket_size(&self, wr: WRef) -> Duration {
101 assert!(matches!(wr, WindowReference::Sliding(_)));
102 self.sliding_window_bucket_size[&wr]
103 }
104}
105
106impl MemBound {
107 pub(crate) fn analyze<M>(spec: &Hir<M>, memory_bound_mode: MemoryBoundMode) -> MemBound
113 where
114 M: HirMode + DepAnaTrait + TypedTrait,
115 {
116 let mut memory_bound_per_stream = spec
118 .all_streams()
119 .map(|sr| (sr, MemorizationBound::default_value(memory_bound_mode)))
120 .collect::<HashMap<SRef, MemorizationBound>>();
121
122 let mut memory_bound_per_window = HashMap::new();
123 let mut sliding_window_bucket_size = HashMap::new();
124
125 spec.graph().edge_indices().for_each(|edge_index| {
127 let cur_edge_weight = spec.graph().edge_weight(edge_index).unwrap();
128 let cur_edge_bound = cur_edge_weight.as_memory_bound(memory_bound_mode);
129 let (_, src_node) = spec.graph().edge_endpoints(edge_index).unwrap();
130 let sr = spec.graph().node_weight(src_node).unwrap();
131 let cur_mem_bound = memory_bound_per_stream.get_mut(sr).unwrap();
132 *cur_mem_bound = if *cur_mem_bound > cur_edge_bound {
133 *cur_mem_bound
134 } else {
135 cur_edge_bound
136 };
137
138 if let StreamAccessKind::SlidingWindow(wr) | StreamAccessKind::DiscreteWindow(wr) =
139 cur_edge_weight.kind
140 {
141 let num_buckets = match wr {
142 WindowReference::Sliding(_) => {
143 let (bucket_count, bucket_size) =
144 Self::calculate_num_window_buckets(spec, wr, cur_edge_weight.origin);
145 assert!(!sliding_window_bucket_size.contains_key(&wr));
146 sliding_window_bucket_size.insert(wr, bucket_size);
147 bucket_count
148 }
149 WindowReference::Discrete(_) => spec.single_discrete(wr).aggr.duration,
150 WindowReference::Instance(_) => unreachable!(),
151 };
152 let memory_bound = MemorizationBound::Bounded(num_buckets as u32);
153 assert!(!memory_bound_per_window.contains_key(&wr));
154 memory_bound_per_window.insert(wr, memory_bound);
155 }
156 });
157
158 MemBound {
159 memory_bound_per_stream,
160 memory_bound_per_window,
161 sliding_window_bucket_size,
162 }
163 }
164
165 fn calculate_num_window_buckets<M>(
166 spec: &Hir<M>,
167 window: WRef,
168 origin: Origin,
169 ) -> (usize, Duration)
170 where
171 M: HirMode + TypedTrait,
172 {
173 let window = &spec.single_sliding(window);
174 let caller_ty = spec.mode.stream_type(window.caller);
175
176 let caller_pacing = match origin {
177 Origin::Spawn => caller_ty.spawn_pacing,
178 Origin::Filter(_) | Origin::Eval(_) => caller_ty.eval_pacing,
179 Origin::Close => caller_ty.close_pacing,
180 };
181
182 let caller_frequency = match caller_pacing {
183 ConcretePacingType::FixedGlobalPeriodic(p)
184 | ConcretePacingType::FixedLocalPeriodic(p) => p,
185 p => {
186 panic!(
187 "windows can only aggregate periodic streams with fixed frequency: {:?}",
188 p
189 )
190 }
191 };
192 let caller_period = UOM_Time::new::<uom::si::time::second>(
193 caller_frequency.get::<uom::si::frequency::hertz>().inv(),
194 )
195 .get::<uom::si::time::nanosecond>()
196 .to_integer();
197
198 let window_duration = window.aggr.duration.as_nanos() as i64;
199 let bucket_count = (lcm(window_duration, caller_period) / caller_period) as usize;
200 let bucket_size = gcd(window_duration, caller_period);
201 let bucket_size = Duration::from_nanos(bucket_size as u64);
202
203 (bucket_count, bucket_size)
204 }
205}
206
207#[cfg(test)]
208mod dynamic_memory_bound_tests {
209 use rtlola_parser::{parse, ParserConfig};
210
211 use super::*;
212 use crate::config::FrontendConfig;
213 use crate::modes::BaseMode;
214 fn check_memory_bound_for_spec(
215 spec: &str,
216 ref_memory_bounds: HashMap<SRef, MemorizationBound>,
217 ) {
218 let parser_config = ParserConfig::for_string(spec.to_string());
219 let frontend_config = FrontendConfig::from(&parser_config);
220 let ast = parse(&parser_config).unwrap_or_else(|e| panic!("{:?}", e));
221 let hir = Hir::<BaseMode>::from_ast(ast)
222 .unwrap()
223 .check_types(&frontend_config)
224 .unwrap()
225 .analyze_dependencies(&frontend_config)
226 .unwrap()
227 .determine_evaluation_order(&frontend_config)
228 .unwrap();
229 let bounds = MemBound::analyze(&hir, MemoryBoundMode::Dynamic);
230 assert_eq!(
231 bounds.memory_bound_per_stream.len(),
232 ref_memory_bounds.len()
233 );
234 bounds.memory_bound_per_stream.iter().for_each(|(sr, b)| {
235 let ref_b = ref_memory_bounds.get(sr).unwrap();
236 assert_eq!(b, ref_b);
237 });
238 }
239
240 #[test]
241 fn synchronous_lookup() {
242 let spec = "input a: UInt8\noutput b: UInt8 := a";
243 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
244 .into_iter()
245 .collect::<HashMap<&str, SRef>>();
246 let memory_bounds = vec![
247 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
248 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
249 ]
250 .into_iter()
251 .collect();
252 check_memory_bound_for_spec(spec, memory_bounds)
253 }
254
255 #[test]
256 fn hold_lookup() {
257 let spec = "input a: UInt8\noutput b: UInt8 @1Hz := a.hold().defaults(to: 0)";
258 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
259 .into_iter()
260 .collect::<HashMap<&str, SRef>>();
261 let memory_bounds = vec![
262 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
263 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
264 ]
265 .into_iter()
266 .collect();
267 check_memory_bound_for_spec(spec, memory_bounds)
268 }
269
270 #[test]
271 fn offset_lookup() {
272 let spec = "input a: UInt8\noutput b: UInt8 := a.offset(by: -1).defaults(to: 0)";
273 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
274 .into_iter()
275 .collect::<HashMap<&str, SRef>>();
276 let memory_bounds = vec![
277 (sname_to_sref["a"], MemorizationBound::Bounded(2)),
278 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
279 ]
280 .into_iter()
281 .collect();
282 check_memory_bound_for_spec(spec, memory_bounds)
283 }
284
285 #[test]
286 fn sliding_window_lookup() {
287 let spec = "input a: UInt8\noutput b: UInt8 @1Hz := a.aggregate(over: 1s, using: sum)";
288 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
289 .into_iter()
290 .collect::<HashMap<&str, SRef>>();
291 let memory_bounds = vec![
292 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
293 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
294 ]
295 .into_iter()
296 .collect();
297 check_memory_bound_for_spec(spec, memory_bounds)
298 }
299
300 #[test]
301 fn discrete_window_lookup() {
302 let spec = "input a: UInt8\noutput b: UInt8 := a.aggregate(over_discrete: 5, using: sum)";
303 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
304 .into_iter()
305 .collect::<HashMap<&str, SRef>>();
306 let memory_bounds = vec![
307 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
308 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
309 ]
310 .into_iter()
311 .collect();
312 check_memory_bound_for_spec(spec, memory_bounds)
313 }
314
315 #[test]
316 fn offset_lookups() {
317 let spec = "input a: UInt8\noutput b: UInt8 := a.offset(by:-1).defaults(to: 0)\noutput c: UInt8 := a.offset(by:-2).defaults(to: 0)\noutput d: UInt8 := a.offset(by:-3).defaults(to: 0)\noutput e: UInt8 := a.offset(by:-4).defaults(to: 0)";
318 let sname_to_sref = vec![
319 ("a", SRef::In(0)),
320 ("b", SRef::Out(0)),
321 ("c", SRef::Out(1)),
322 ("d", SRef::Out(2)),
323 ("e", SRef::Out(3)),
324 ]
325 .into_iter()
326 .collect::<HashMap<&str, SRef>>();
327 let memory_bounds = vec![
328 (sname_to_sref["a"], MemorizationBound::Bounded(5)),
329 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
330 (sname_to_sref["c"], MemorizationBound::Bounded(0)),
331 (sname_to_sref["d"], MemorizationBound::Bounded(0)),
332 (sname_to_sref["e"], MemorizationBound::Bounded(0)),
333 ]
334 .into_iter()
335 .collect();
336 check_memory_bound_for_spec(spec, memory_bounds)
337 }
338 #[test]
339 fn negative_loop_different_offsets() {
340 let spec = "input a: Int8\noutput b: Int8 := a.offset(by: -1).defaults(to: 0) + d.offset(by:-2).defaults(to:0)\noutput c: Int8 := b.offset(by:-3).defaults(to: 0)\noutput d: Int8 := c.offset(by:-4).defaults(to: 0)";
341 let sname_to_sref = vec![
342 ("a", SRef::In(0)),
343 ("b", SRef::Out(0)),
344 ("c", SRef::Out(1)),
345 ("d", SRef::Out(2)),
346 ]
347 .into_iter()
348 .collect::<HashMap<&str, SRef>>();
349 let memory_bounds = vec![
350 (sname_to_sref["a"], MemorizationBound::Bounded(2)),
351 (sname_to_sref["b"], MemorizationBound::Bounded(4)),
352 (sname_to_sref["c"], MemorizationBound::Bounded(5)),
353 (sname_to_sref["d"], MemorizationBound::Bounded(3)),
354 ]
355 .into_iter()
356 .collect();
357 check_memory_bound_for_spec(spec, memory_bounds)
358 }
359
360 #[test]
361 fn parameter_loop_with_lookup_in_close() {
362 let spec = "input a: Int8\ninput b: Int8\noutput c(p) spawn with a when a < b eval with p + b + g(p).hold().defaults(to: 0)\noutput d(p) spawn with b when c(4).hold().defaults(to: 0) < 10 eval with b + 5\noutput e(p) spawn with b eval @b with d(p).hold().defaults(to: 0) + 5\noutput f(p) spawn with b eval when e(p).hold().defaults(to: 0) < 6 with b + 5\noutput g(p) spawn with b close @true when f(p).hold().defaults(to: 0) < 6 eval with b + 5";
363 let sname_to_sref = vec![
364 ("a", SRef::In(0)),
365 ("b", SRef::In(1)),
366 ("c", SRef::Out(0)),
367 ("d", SRef::Out(1)),
368 ("e", SRef::Out(2)),
369 ("f", SRef::Out(3)),
370 ("g", SRef::Out(4)),
371 ]
372 .into_iter()
373 .collect::<HashMap<&str, SRef>>();
374 let memory_bounds = vec![
375 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
376 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
377 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
378 (sname_to_sref["d"], MemorizationBound::Bounded(1)),
379 (sname_to_sref["e"], MemorizationBound::Bounded(1)),
380 (sname_to_sref["f"], MemorizationBound::Bounded(1)),
381 (sname_to_sref["g"], MemorizationBound::Bounded(1)),
382 ]
383 .into_iter()
384 .collect();
385 check_memory_bound_for_spec(spec, memory_bounds)
386 }
387
388 #[test]
389 fn parameter_nested_lookup_implicit() {
390 let spec = "input a: Int8\n input b: Int8\n output c(p) spawn with a eval with p + b\noutput d := c(c(b).hold().defaults(to: 0)).hold().defaults(to: 0)";
391 let sname_to_sref = vec![
392 ("a", SRef::In(0)),
393 ("b", SRef::In(1)),
394 ("c", SRef::Out(0)),
395 ("d", SRef::Out(1)),
396 ]
397 .into_iter()
398 .collect::<HashMap<&str, SRef>>();
399 let memory_bounds = vec![
400 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
401 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
402 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
403 (sname_to_sref["d"], MemorizationBound::Bounded(0)),
404 ]
405 .into_iter()
406 .collect();
407 check_memory_bound_for_spec(spec, memory_bounds)
408 }
409 #[test]
410 fn parameter_nested_lookup_explicit() {
411 let spec = "input a: Int8\n input b: Int8\n output c(p) spawn with a eval with p + b\noutput d := c(b).hold().defaults(to: 0)\noutput e := c(d).hold().defaults(to: 0)";
412 let sname_to_sref = vec![
413 ("a", SRef::In(0)),
414 ("b", SRef::In(1)),
415 ("c", SRef::Out(0)),
416 ("d", SRef::Out(1)),
417 ("e", SRef::Out(2)),
418 ]
419 .into_iter()
420 .collect::<HashMap<&str, SRef>>();
421 let memory_bounds = vec![
422 (sname_to_sref["a"], MemorizationBound::Bounded(0)),
423 (sname_to_sref["b"], MemorizationBound::Bounded(0)),
424 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
425 (sname_to_sref["d"], MemorizationBound::Bounded(0)),
426 (sname_to_sref["e"], MemorizationBound::Bounded(0)),
427 ]
428 .into_iter()
429 .collect();
430 check_memory_bound_for_spec(spec, memory_bounds)
431 }
432}
433
434#[cfg(test)]
435mod static_memory_bound_tests {
436 use rtlola_parser::{parse, ParserConfig};
437
438 use super::*;
439 use crate::config::FrontendConfig;
440 use crate::modes::BaseMode;
441
442 fn calculate_memory_bound(spec: &str) -> MemBound {
443 let parser_config = ParserConfig::for_string(spec.to_string());
444 let frontend_config = FrontendConfig::from(&parser_config);
445 let ast = parse(&parser_config).unwrap_or_else(|e| panic!("{:?}", e));
446 let hir = Hir::<BaseMode>::from_ast(ast)
447 .unwrap()
448 .check_types(&frontend_config)
449 .unwrap()
450 .analyze_dependencies(&frontend_config)
451 .unwrap()
452 .determine_evaluation_order(&frontend_config)
453 .unwrap();
454 MemBound::analyze(&hir, MemoryBoundMode::Static)
455 }
456 fn check_memory_bound_for_spec(
457 spec: &str,
458 ref_memory_bounds: HashMap<SRef, MemorizationBound>,
459 ) {
460 let bounds = calculate_memory_bound(spec);
461 assert_eq!(
462 bounds.memory_bound_per_stream.len(),
463 ref_memory_bounds.len()
464 );
465 bounds.memory_bound_per_stream.iter().for_each(|(sr, b)| {
466 let ref_b = ref_memory_bounds.get(sr).unwrap();
467 assert_eq!(b, ref_b);
468 });
469 }
470
471 fn check_memory_bound_for_windows(
472 spec: &str,
473 ref_memory_bounds: HashMap<WRef, (MemorizationBound, Duration)>,
474 ) {
475 let bounds = calculate_memory_bound(spec);
476 assert_eq!(
477 bounds.memory_bound_per_window.len(),
478 ref_memory_bounds.len()
479 );
480 bounds.memory_bound_per_window.iter().for_each(|(wr, b)| {
481 let ref_b = ref_memory_bounds.get(wr).unwrap().0.unwrap();
482 assert_eq!(b.unwrap(), ref_b, "{}", wr);
483 });
484 bounds
485 .sliding_window_bucket_size
486 .iter()
487 .for_each(|(wr, b)| {
488 let ref_b = ref_memory_bounds.get(wr).unwrap().1;
489 assert_eq!(*b, ref_b, "{}", wr);
490 })
491 }
492
493 #[test]
494 fn synchronous_lookup() {
495 let spec = "input a: UInt8\noutput b: UInt8 := a";
496 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
497 .into_iter()
498 .collect::<HashMap<&str, SRef>>();
499 let memory_bounds = vec![
500 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
501 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
502 ]
503 .into_iter()
504 .collect();
505 check_memory_bound_for_spec(spec, memory_bounds)
506 }
507
508 #[test]
509 fn hold_lookup() {
510 let spec = "input a: UInt8\noutput b: UInt8 @1Hz := a.hold().defaults(to: 0)";
511 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
512 .into_iter()
513 .collect::<HashMap<&str, SRef>>();
514 let memory_bounds = vec![
515 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
516 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
517 ]
518 .into_iter()
519 .collect();
520 check_memory_bound_for_spec(spec, memory_bounds)
521 }
522
523 #[test]
524 fn offset_lookup() {
525 let spec = "input a: UInt8\noutput b: UInt8 := a.offset(by: -1).defaults(to: 0)";
526 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
527 .into_iter()
528 .collect::<HashMap<&str, SRef>>();
529 let memory_bounds = vec![
530 (sname_to_sref["a"], MemorizationBound::Bounded(2)),
531 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
532 ]
533 .into_iter()
534 .collect();
535 check_memory_bound_for_spec(spec, memory_bounds)
536 }
537
538 #[test]
539 fn discrete_window_lookup() {
540 let spec = "input a: UInt8\noutput b: UInt8 := a.aggregate(over_discrete: 5, using: sum)";
541 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
542 .into_iter()
543 .collect::<HashMap<&str, SRef>>();
544 let memory_bounds = vec![
545 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
546 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
547 ]
548 .into_iter()
549 .collect();
550 check_memory_bound_for_spec(spec, memory_bounds)
551 }
552
553 #[test]
554 fn sliding_window_lookup() {
555 let spec = "input a: UInt8\noutput b: UInt8 @1Hz := a.aggregate(over: 1s, using: sum)";
556 let sname_to_sref = vec![("a", SRef::In(0)), ("b", SRef::Out(0))]
557 .into_iter()
558 .collect::<HashMap<&str, SRef>>();
559 let memory_bounds = vec![
560 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
561 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
562 ]
563 .into_iter()
564 .collect();
565 check_memory_bound_for_spec(spec, memory_bounds)
566 }
567
568 #[test]
569 fn offset_lookups() {
570 let spec = "input a: UInt8\noutput b: UInt8 := a.offset(by:-1).defaults(to: 0)\noutput c: UInt8 := a.offset(by:-2).defaults(to: 0)\noutput d: UInt8 := a.offset(by:-3).defaults(to: 0)\noutput e: UInt8 := a.offset(by:-4).defaults(to: 0)";
571 let sname_to_sref = vec![
572 ("a", SRef::In(0)),
573 ("b", SRef::Out(0)),
574 ("c", SRef::Out(1)),
575 ("d", SRef::Out(2)),
576 ("e", SRef::Out(3)),
577 ]
578 .into_iter()
579 .collect::<HashMap<&str, SRef>>();
580 let memory_bounds = vec![
581 (sname_to_sref["a"], MemorizationBound::Bounded(5)),
582 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
583 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
584 (sname_to_sref["d"], MemorizationBound::Bounded(1)),
585 (sname_to_sref["e"], MemorizationBound::Bounded(1)),
586 ]
587 .into_iter()
588 .collect();
589 check_memory_bound_for_spec(spec, memory_bounds)
590 }
591 #[test]
592 fn negative_loop_different_offsets() {
593 let spec = "input a: Int8\noutput b: Int8 := a.offset(by: -1).defaults(to: 0) + d.offset(by:-2).defaults(to:0)\noutput c: Int8 := b.offset(by:-3).defaults(to: 0)\noutput d: Int8 := c.offset(by:-4).defaults(to: 0)";
594 let sname_to_sref = vec![
595 ("a", SRef::In(0)),
596 ("b", SRef::Out(0)),
597 ("c", SRef::Out(1)),
598 ("d", SRef::Out(2)),
599 ]
600 .into_iter()
601 .collect::<HashMap<&str, SRef>>();
602 let memory_bounds = vec![
603 (sname_to_sref["a"], MemorizationBound::Bounded(2)),
604 (sname_to_sref["b"], MemorizationBound::Bounded(4)),
605 (sname_to_sref["c"], MemorizationBound::Bounded(5)),
606 (sname_to_sref["d"], MemorizationBound::Bounded(3)),
607 ]
608 .into_iter()
609 .collect();
610 check_memory_bound_for_spec(spec, memory_bounds)
611 }
612
613 #[test]
614 fn parameter_loop_with_lookup_in_close() {
615 let spec = "input a: Int8\ninput b: Int8\noutput c(p) spawn with a when a < b eval with p + b + g(p).hold().defaults(to: 0)\noutput d(p) spawn with b when c(4).hold().defaults(to: 0) < 10 eval with b + 5\noutput e(p) spawn with b eval @b with d(p).hold().defaults(to: 0) + 5\noutput f(p) spawn with b eval when e(p).hold().defaults(to: 0) < 6 with b + 5\noutput g(p) spawn with b close @true when f(p).hold().defaults(to: 0) < 6 eval with b + 5";
616 let sname_to_sref = vec![
617 ("a", SRef::In(0)),
618 ("b", SRef::In(1)),
619 ("c", SRef::Out(0)),
620 ("d", SRef::Out(1)),
621 ("e", SRef::Out(2)),
622 ("f", SRef::Out(3)),
623 ("g", SRef::Out(4)),
624 ]
625 .into_iter()
626 .collect::<HashMap<&str, SRef>>();
627 let memory_bounds = vec![
628 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
629 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
630 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
631 (sname_to_sref["d"], MemorizationBound::Bounded(1)),
632 (sname_to_sref["e"], MemorizationBound::Bounded(1)),
633 (sname_to_sref["f"], MemorizationBound::Bounded(1)),
634 (sname_to_sref["g"], MemorizationBound::Bounded(1)),
635 ]
636 .into_iter()
637 .collect();
638 check_memory_bound_for_spec(spec, memory_bounds)
639 }
640
641 #[test]
642 fn parameter_nested_lookup_implicit() {
643 let spec = "input a: Int8\n input b: Int8\n output c(p) spawn with a eval with p + b\noutput d := c(c(b).hold().defaults(to: 0)).hold().defaults(to: 0)";
644 let sname_to_sref = vec![
645 ("a", SRef::In(0)),
646 ("b", SRef::In(1)),
647 ("c", SRef::Out(0)),
648 ("d", SRef::Out(1)),
649 ]
650 .into_iter()
651 .collect::<HashMap<&str, SRef>>();
652 let memory_bounds = vec![
653 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
654 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
655 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
656 (sname_to_sref["d"], MemorizationBound::Bounded(1)),
657 ]
658 .into_iter()
659 .collect();
660 check_memory_bound_for_spec(spec, memory_bounds)
661 }
662 #[test]
663 fn parameter_nested_lookup_explicit() {
664 let spec = "input a: Int8\n input b: Int8\n output c(p) spawn with a eval with p + b\noutput d := c(b).hold().defaults(to: 0)\noutput e := c(d).hold().defaults(to: 0)";
665 let sname_to_sref = vec![
666 ("a", SRef::In(0)),
667 ("b", SRef::In(1)),
668 ("c", SRef::Out(0)),
669 ("d", SRef::Out(1)),
670 ("e", SRef::Out(2)),
671 ]
672 .into_iter()
673 .collect::<HashMap<&str, SRef>>();
674 let memory_bounds = vec![
675 (sname_to_sref["a"], MemorizationBound::Bounded(1)),
676 (sname_to_sref["b"], MemorizationBound::Bounded(1)),
677 (sname_to_sref["c"], MemorizationBound::Bounded(1)),
678 (sname_to_sref["d"], MemorizationBound::Bounded(1)),
679 (sname_to_sref["e"], MemorizationBound::Bounded(1)),
680 ]
681 .into_iter()
682 .collect();
683 check_memory_bound_for_spec(spec, memory_bounds)
684 }
685
686 #[test]
687 fn sliding_window_memory_bound() {
688 let spec = "input a : UInt64\noutput b@1s := a.aggregate(over: 3s, using: sum)\noutput c@4s := a.aggregate(over: 2s, using: sum)\noutput d@2s := a.aggregate(over: 3s, using: sum)\noutput e@3s := a.aggregate(over: 2s, using: sum)";
689 let ref_memory_bounds = vec![
690 (
691 WRef::Sliding(0),
692 (MemorizationBound::Bounded(3), Duration::from_secs(1)),
693 ),
694 (
695 WRef::Sliding(1),
696 (MemorizationBound::Bounded(1), Duration::from_secs(2)),
697 ),
698 (
699 WRef::Sliding(2),
700 (MemorizationBound::Bounded(3), Duration::from_secs(1)),
701 ),
702 (
703 WRef::Sliding(3),
704 (MemorizationBound::Bounded(2), Duration::from_secs(1)),
705 ),
706 ]
707 .into_iter()
708 .collect();
709 check_memory_bound_for_windows(spec, ref_memory_bounds)
710 }
711}