1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
//! Module for the Task definition
use crate::curve::{AggregateExt, Curve};
use crate::iterators::curve::CurveDeltaIterator;
use crate::iterators::task::TaskDemandIterator;
use crate::iterators::{CurveIterator, ReclassifyIterator};
use crate::server::ActualServerExecution;
use crate::system::System;
use crate::task::curve_types::{
ActualTaskExecution, AvailableTaskExecution, HigherPriorityTaskDemand,
};
use crate::time::{TimeUnit, UnitNumber};
use crate::window::WindowEnd;
use crate::window::{Demand, Window};
pub mod curve_types {
//! Module for `CurveType`s of a Task
/// Marker Type for Curves representing a Tasks Demand
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct TaskDemand;
/// Mark Type for Curves representing aggregated higher priority task demand
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct HigherPriorityTaskDemand;
/// Marker type for Curves representing the available execution for a task
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct AvailableTaskExecution;
/// Marker type for Curves representing the actual execution for a task
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct ActualTaskExecution;
}
/// The Task type based on the Modeling described in the second paragraph
/// of Chapter 3. in the paper
#[derive(Debug, Clone, Copy)]
pub struct Task {
/// The offset of the tasks, O index i in the paper
pub offset: TimeUnit,
/// The demand induced by the task
/// called the worst-case execution time (WCET) C index i in the paper
pub demand: TimeUnit,
/// The interval of the task, called Period P index i in the paper
pub interval: TimeUnit,
}
impl Task {
/// Create a new Task with the corresponding parameters
///
/// # Panics
/// If the interval is shorter than the demand
#[must_use]
pub fn new<I: Into<TimeUnit>>(demand: I, interval: I, offset: I) -> Self {
let demand = demand.into();
let interval = interval.into();
if interval < demand {
panic!("Task can't have an interval shorter than its demand!")
}
Task {
offset: offset.into(),
demand,
interval,
}
}
/// calculate the Higher Priority task Demand for the task with priority `index` as defined in Definition 14. (1) in the paper,
/// for a set of tasks indexed by their priority (lower index <=> higher priority) and up to the specified limit
#[must_use]
pub fn higher_priority_task_demand_iter(
tasks: &[Self],
index: usize,
) -> impl CurveIterator<CurveKind = HigherPriorityTaskDemand> + Clone + '_ {
tasks[..index]
.iter()
.map(|task| task.into_iter())
.aggregate::<ReclassifyIterator<_, _>>()
}
/// Calculate the available execution Curve for the task with priority `task_index` of the server with priority `server_index`
/// up to the specified limit.
///
/// Based on Definition 14. (2) of the paper
#[must_use]
pub fn available_execution_curve_impl<'a, HPTD, ASEC>(
constrained_server_execution_curve: ASEC,
higher_priority_task_demand: HPTD,
) -> impl CurveIterator<CurveKind = AvailableTaskExecution> + Clone + 'a
where
HPTD: CurveIterator<CurveKind = HigherPriorityTaskDemand> + Clone + 'a,
ASEC: CurveIterator<CurveKind = ActualServerExecution> + Clone + 'a,
{
let delta = CurveDeltaIterator::new(
constrained_server_execution_curve,
higher_priority_task_demand,
);
delta
.remaining_supply()
.reclassify::<AvailableTaskExecution>()
}
/// Calculate the actual execution Curve for the Task with priority `task_index` of the Server with priority `server_index`
/// up to the specified limit.
///
/// Based on Definition 14. (3) of the paper
#[must_use]
pub fn original_actual_execution_curve_iter<'a>(
system: &'a System,
server_index: usize,
task_index: usize,
) -> impl CurveIterator<CurveKind = ActualTaskExecution> + Clone + 'a {
let asec = system.original_actual_execution_curve_iter(server_index);
let hptd = Task::higher_priority_task_demand_iter(
system.as_servers()[server_index].as_tasks(),
task_index,
);
let available_execution_curve = Task::available_execution_curve_impl(asec, hptd);
let task_demand_curve =
system.as_servers()[server_index].as_tasks()[task_index].into_iter();
CurveDeltaIterator::new(available_execution_curve, task_demand_curve)
.overlap::<ActualTaskExecution>()
}
#[must_use]
pub fn fixed_actual_execution_curve_iter<'a>(
system: &'a System,
server_index: usize,
task_index: usize,
) -> impl CurveIterator<CurveKind = ActualTaskExecution> + Clone + 'a {
let asec = system.fixed_actual_execution_curve_iter(server_index);
let hptd = Task::higher_priority_task_demand_iter(
system.as_servers()[server_index].as_tasks(),
task_index,
);
let available_execution_curve = Task::available_execution_curve_impl(asec, hptd);
let task_demand_curve =
system.as_servers()[server_index].as_tasks()[task_index].into_iter();
CurveDeltaIterator::new(available_execution_curve, task_demand_curve)
.overlap::<ActualTaskExecution>()
}
/// Calculate the WCRT for the task with priority `task_index` for the Server with priority `server_index`
///
/// See definition 15. of the paper for reference
///
/// Takes the system of servers that the task which worst case execution time shall be calculated is part of
/// the priority/index of the server the Task belongs to
/// and the tasks priority/index in that server
/// as well as the time till which jobs that arrive prior shall be considered for the analysis
///
/// # Panics
/// When sanity checks fail
#[must_use]
pub fn original_worst_case_response_time(
system: &System,
server_index: usize,
task_index: usize,
arrival_before: TimeUnit,
) -> TimeUnit {
let swh = arrival_before;
let actual_execution_time_iter =
Task::original_actual_execution_curve_iter(system, server_index, task_index);
let task = &system.as_servers()[server_index].as_tasks()[task_index];
// arrival of the last job that starts before the swh
let last_job = (swh - task.offset - TimeUnit::ONE) / task.interval;
let total_execution = (last_job + 1) * task.demand;
let mut provided = WindowEnd::Finite(TimeUnit::ZERO);
let actual_execution_time: Curve<_> = actual_execution_time_iter
.take_while_curve(|task| {
let take = provided < total_execution;
provided += task.length();
take
})
.collect_curve();
// sanity check that last_job arrival is before swh
assert!(
task.job_arrival(last_job) < swh,
"Last job should arrive before the system wide hyper period"
);
// sanity check that job after last_job is not before swh
assert!(
swh <= task.job_arrival(last_job + 1),
"The job after the last job would arrive after or at the system wide hyper period"
);
assert!(
WindowEnd::Finite((last_job + 1) * task.demand) <= actual_execution_time.capacity(),
"There should be enough capacity for the last job"
);
(0..=last_job)
.into_iter()
.map(|job| {
let arrival = task.job_arrival(job);
let t = (job + 1) * task.demand;
Task::time_to_provide(&actual_execution_time, t) - arrival
})
.max()
.unwrap_or(TimeUnit::ZERO)
}
#[must_use]
pub fn fixed_worst_case_response_time(
system: &System,
server_index: usize,
task_index: usize,
arrival_before: TimeUnit,
) -> TimeUnit {
let swh = arrival_before;
let actual_execution_time_iter =
Task::fixed_actual_execution_curve_iter(system, server_index, task_index);
let task = &system.as_servers()[server_index].as_tasks()[task_index];
// arrival of the last job that starts before the swh
let last_job = (swh - task.offset - TimeUnit::ONE) / task.interval;
let total_execution = (last_job + 1) * task.demand;
let mut provided = WindowEnd::Finite(TimeUnit::ZERO);
let actual_execution_time: Curve<_> = actual_execution_time_iter
.take_while_curve(|task| {
let take = provided < total_execution;
provided += task.length();
take
})
.collect_curve();
// sanity check that last_job arrival is before swh
assert!(
task.job_arrival(last_job) < swh,
"Last job should arrive before the system wide hyper period"
);
// sanity check that job after last_job is not before swh
assert!(
swh <= task.job_arrival(last_job + 1),
"The job after the last job would arrive after or at the system wide hyper period"
);
assert!(
WindowEnd::Finite((last_job + 1) * task.demand) <= actual_execution_time.capacity(),
"There should be enough capacity for the last job"
);
(0..=last_job)
.into_iter()
.map(|job| {
let arrival = task.job_arrival(job);
let t = (job + 1) * task.demand;
Task::time_to_provide(&actual_execution_time, t) - arrival
})
.max()
.unwrap_or(TimeUnit::ZERO)
}
/// Calculate the time till the execution curve has served t Units of Demand
/// Implementing Algorithm 5. form the paper
///
/// # Panics
/// When the capacity of the curve is less than t
/// or t is [`TimeUnit::ZERO`]
#[must_use]
pub fn time_to_provide(
actual_execution_time: &Curve<ActualTaskExecution>,
t: TimeUnit,
) -> TimeUnit {
// Note: paper lists wants to find largest index k with the sum of the windows 0..=k < t
// but when calculating k the sum skips 0
// finding the largest index k with the sum of the windows 1..=k < t
// this appears to be a mix-up between 0-based and 1-based indexing and
// is therefore not replicated in this implementation
// (1)
// index here is exclusive aka. k+1 as appose to inclusive as in the paper
let (index, sum) = actual_execution_time
.as_windows()
.iter()
.enumerate()
.scan(TimeUnit::ZERO, |acc, (index, window)| {
match window.length() {
WindowEnd::Finite(length) => {
*acc += length;
(*acc < t).then(|| (index + 1, *acc))
}
WindowEnd::Infinite => None,
}
})
.last()
.unwrap_or((0, TimeUnit::ZERO));
// (2)
let b = t - sum;
// this should hold as sum is the largest sum of head window lengths less than t
debug_assert!(
b > TimeUnit::ZERO,
"There should be remaining demand, but b = {:?}",
b
);
actual_execution_time.as_windows()[index].start + b
}
/// Calculate the arrival for the job_index+1-th job
///
/// Note: The paper uses 1-index for jobs while this uses 0-index
#[must_use]
pub fn job_arrival(&self, job_index: UnitNumber) -> TimeUnit {
self.offset + job_index * self.interval
}
}
impl IntoIterator for Task {
type Item = Window<Demand>;
type IntoIter = TaskDemandIterator;
/// Generate the Demand Curve for the Task
///
/// Based on Definition 9. and 10. of the paper
fn into_iter(self) -> Self::IntoIter {
TaskDemandIterator::new(self)
}
}