rtfm_syntax/
analyze.rs

1//! RTFM application analysis
2
3use core::cmp;
4use std::collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap};
5
6use indexmap::IndexMap;
7use syn::{Ident, Type};
8
9use crate::{ast::App, Core, Set};
10
11pub(crate) fn app(app: &App) -> Analysis {
12    // a. Which core initializes which resources
13    let mut late_resources = LateResources::new();
14    if !app.late_resources.is_empty() {
15        let mut resources = app.late_resources.keys().cloned().collect::<BTreeSet<_>>();
16        let mut rest = None;
17        for (&core, init) in &app.inits {
18            if init.args.late.is_empty() {
19                // this was checked in the `check` pass
20                debug_assert!(rest.is_none());
21
22                rest = Some(core);
23            } else {
24                let late_resources = late_resources.entry(core).or_default();
25
26                for name in &init.args.late {
27                    late_resources.insert(name.clone());
28                    resources.remove(name);
29                }
30            }
31        }
32
33        if let Some(rest) = rest {
34            late_resources.insert(rest, resources);
35        }
36    }
37
38    // c. Ceiling analysis of Exclusive resources
39    // d. Sync-ness of Access::Shared resources
40    // e. Location of resources
41    // f. Cross initialization needs a post-initialization synchronization barrier
42    let mut initialization_barriers = InitializationBarriers::new();
43    let mut locations = app
44        .late_resources
45        .iter()
46        .chain(app.resources.iter().map(|(name, res)| (name, &res.late)))
47        .filter_map(|(name, lr)| {
48            if lr.shared {
49                Some((
50                    name.clone(),
51                    Location::Shared {
52                        cores: BTreeSet::new(),
53                    },
54                ))
55            } else {
56                None
57            }
58        })
59        .collect::<Locations>();
60    let mut ownerships = Ownerships::new();
61    let mut shared_accesses = HashMap::new();
62    let mut sync_types = SyncTypes::new();
63    for (core, prio, name, access) in app.resource_accesses() {
64        let res = app.resource(name).expect("UNREACHABLE").0;
65
66        // (d)
67        if access.is_shared() {
68            if let Some(&other_core) = shared_accesses.get(name) {
69                if other_core != core {
70                    // a resources accessed from different cores needs to be `Sync` regardless of
71                    // priorities
72                    sync_types.entry(core).or_default().insert(res.ty.clone());
73                    sync_types
74                        .entry(other_core)
75                        .or_default()
76                        .insert(res.ty.clone());
77                }
78            } else {
79                shared_accesses.insert(name, core);
80            }
81        }
82
83        // (e)
84        if let Some(loc) = locations.get_mut(name) {
85            match loc {
86                Location::Owned {
87                    core: other_core, ..
88                } => {
89                    if core != *other_core {
90                        let mut cores = BTreeSet::new();
91                        cores.insert(core);
92                        cores.insert(*other_core);
93                        *loc = Location::Shared { cores };
94                    }
95                }
96
97                Location::Shared { cores } => {
98                    cores.insert(core);
99                }
100            }
101        } else {
102            locations.insert(
103                name.clone(),
104                Location::Owned {
105                    core,
106                    cross_initialized: false,
107                },
108            );
109        }
110
111        // (c)
112        if let Some(priority) = prio {
113            if let Some(ownership) = ownerships.get_mut(name) {
114                match *ownership {
115                    Ownership::Owned { priority: ceiling }
116                    | Ownership::CoOwned { priority: ceiling }
117                    | Ownership::Contended { ceiling }
118                        if priority != ceiling =>
119                    {
120                        *ownership = Ownership::Contended {
121                            ceiling: cmp::max(ceiling, priority),
122                        };
123
124                        if access.is_shared() {
125                            sync_types.entry(core).or_default().insert(res.ty.clone());
126                        }
127                    }
128
129                    Ownership::Owned { priority: ceil } if ceil == priority => {
130                        *ownership = Ownership::CoOwned { priority };
131                    }
132
133                    _ => {}
134                }
135            } else {
136                ownerships.insert(name.clone(), Ownership::Owned { priority });
137            }
138        }
139
140        // (f) in cross-initialization the initializer core is like a sender and the user core is
141        // like a receiver
142        let receiver = core;
143        for (&sender, resources) in &late_resources {
144            if sender == receiver {
145                continue;
146            }
147
148            if resources.contains(name) {
149                initialization_barriers
150                    .entry(receiver)
151                    .or_default()
152                    .insert(sender);
153            }
154        }
155    }
156
157    for (name, loc) in &mut locations {
158        if let Location::Owned {
159            core,
160            cross_initialized,
161        } = loc
162        {
163            for (&initializer, resources) in &late_resources {
164                if resources.contains(name) && *core != initializer {
165                    *cross_initialized = true;
166                }
167            }
168        }
169    }
170
171    // Most late resources need to be `Send`
172    let mut send_types = SendTypes::new();
173    let owned_by_idle = Ownership::Owned { priority: 0 };
174    for (name, res) in app.late_resources.iter() {
175        // cross-initialized || not owned by idle
176        if locations
177            .get(name)
178            .map(|loc| loc.cross_initialized())
179            .unwrap_or(false)
180            || ownerships
181                .get(name)
182                .map(|ownership| *ownership != owned_by_idle)
183                .unwrap_or(false)
184        {
185            if let Some(loc) = locations.get(name) {
186                match loc {
187                    Location::Owned { core, .. } => {
188                        send_types.entry(*core).or_default().insert(res.ty.clone());
189                    }
190
191                    Location::Shared { cores } => cores.iter().for_each(|&core| {
192                        send_types.entry(core).or_default().insert(res.ty.clone());
193                    }),
194                }
195            }
196        }
197    }
198
199    // All resources shared with `init` (ownership != None) need to be `Send`
200    for name in app
201        .inits
202        .values()
203        .flat_map(|init| init.args.resources.keys())
204    {
205        if let Some(ownership) = ownerships.get(name) {
206            if *ownership != owned_by_idle {
207                if let Some(loc) = locations.get(name) {
208                    match loc {
209                        Location::Owned { core, .. } => {
210                            send_types
211                                .entry(*core)
212                                .or_default()
213                                .insert(app.resources[name].ty.clone());
214                        }
215
216                        Location::Shared { cores } => cores.iter().for_each(|&core| {
217                            send_types
218                                .entry(core)
219                                .or_default()
220                                .insert(app.resources[name].ty.clone());
221                        }),
222                    }
223                }
224            }
225        }
226    }
227
228    // Initialize the timer queues
229    let mut timer_queues = TimerQueues::new();
230    for (scheduler_core, _scheduler_prio, name) in app.schedule_calls() {
231        let schedulee = &app.software_tasks[name];
232        let schedulee_core = schedulee.args.core;
233        let schedulee_prio = schedulee.args.priority;
234
235        let tq = timer_queues.entry(scheduler_core).or_default();
236        tq.tasks.insert(name.clone());
237
238        if scheduler_core == schedulee_core {
239            // the handler priority must match the priority of the highest priority schedulee that's
240            // dispatched in the same core
241            tq.priority = cmp::max(tq.priority, schedulee_prio);
242
243            // the priority ceiling must be equal or greater than the handler priority
244            tq.ceiling = tq.priority;
245        } else {
246            // when cross-scheduling the timer handler needs to run at the highest local priority
247            tq.priority = app
248                .hardware_tasks
249                .values()
250                .filter_map(|task| {
251                    if task.args.core == scheduler_core {
252                        Some(task.args.priority)
253                    } else {
254                        None
255                    }
256                })
257                .chain(app.software_tasks.values().filter_map(|task| {
258                    if task.args.core == scheduler_core {
259                        Some(task.args.priority)
260                    } else {
261                        None
262                    }
263                }))
264                .max()
265                .map(|prio| prio + 1)
266                .unwrap_or(tq.priority);
267
268            // the priority ceiling must be equal or greater than the handler priority
269            tq.ceiling = tq.priority;
270        }
271    }
272
273    // g. Ceiling analysis of free queues (consumer end point) -- first pass
274    // h. Ceiling analysis of the channels (producer end point) -- first pass
275    // i. Spawn barriers analysis
276    // j. Send analysis
277    let mut channels = Channels::new();
278    let mut free_queues = FreeQueues::new();
279    let mut spawn_barriers = SpawnBarriers::new();
280    for (spawner_core, spawner_prio, name) in app.spawn_calls() {
281        let spawnee = &app.software_tasks[name];
282        let spawnee_core = spawnee.args.core;
283        let spawnee_prio = spawnee.args.priority;
284
285        let mut must_be_send = false;
286        if spawner_core != spawnee_core {
287            // (i)
288            let spawned_from_init = spawner_prio.is_none();
289            spawn_barriers
290                .entry(spawnee_core)
291                .or_default()
292                .insert(spawner_core, spawned_from_init);
293
294            // (j) messages that cross the core boundary need to be `Send`
295            must_be_send = true;
296        }
297
298        let channel = channels
299            .entry(spawnee_core)
300            .or_default()
301            .entry(spawnee_prio)
302            .or_default()
303            .entry(spawner_core)
304            .or_default();
305        channel.tasks.insert(name.clone());
306
307        let fq = free_queues
308            .entry(name.clone())
309            .or_default()
310            .entry(spawner_core)
311            .or_default();
312
313        if let Some(prio) = spawner_prio {
314            // (h) Spawners contend for the `channel`
315            match channel.ceiling {
316                None => channel.ceiling = Some(prio),
317                Some(ceil) => channel.ceiling = Some(cmp::max(prio, ceil)),
318            }
319
320            // (g) Spawners contend for the free queue
321            match *fq {
322                None => *fq = Some(prio),
323                Some(ceil) => *fq = Some(cmp::max(ceil, prio)),
324            }
325
326            // (j) core-local messages that connect tasks running at different priorities need to be
327            // `Send`
328            if spawner_core == spawnee_core && spawnee_prio != prio {
329                must_be_send = true;
330            }
331        } else if spawner_core == spawnee_core {
332            // (g, h) spawns from `init` are excluded from the ceiling analysis
333            // (j) but spawns from `init` must be `Send`
334            must_be_send = true;
335        }
336
337        if must_be_send {
338            {
339                let send_types = send_types.entry(spawner_core).or_default();
340
341                spawnee.inputs.iter().for_each(|input| {
342                    send_types.insert(input.ty.clone());
343                });
344            }
345
346            let send_types = send_types.entry(spawnee_core).or_default();
347
348            spawnee.inputs.iter().for_each(|input| {
349                send_types.insert(input.ty.clone());
350            });
351        }
352    }
353
354    // k. Ceiling analysis of free queues (consumer end point) -- second pass
355    // l. Ceiling analysis of the channels (producer end point) -- second pass
356    // m. Ceiling analysis of the timer queue
357    // n. Spawn barriers analysis (schedule edition)
358    // o. Send analysis
359    for (scheduler_core, scheduler_prio, name) in app.schedule_calls() {
360        let schedulee = &app.software_tasks[name];
361        let schedulee_core = schedulee.args.core;
362        let schedulee_prio = schedulee.args.priority;
363
364        let mut must_be_send = false;
365        if scheduler_core != schedulee_core {
366            // (n)
367            match spawn_barriers
368                .entry(schedulee_core)
369                .or_default()
370                .entry(scheduler_core)
371            {
372                // NOTE `schedule`s always send messages from the timer queue handler so they never
373                // send messages during `init`
374                Entry::Vacant(entry) => {
375                    entry.insert(false);
376                }
377
378                Entry::Occupied(..) => {}
379            }
380
381            // (o) messages that cross the core boundary need to be `Send`
382            must_be_send = true;
383        }
384
385        let tq = timer_queues.get_mut(&scheduler_core).expect("UNREACHABLE");
386
387        let channel = channels
388            .entry(schedulee_core)
389            .or_default()
390            .entry(schedulee_prio)
391            .or_default()
392            .entry(scheduler_core)
393            .or_default();
394        channel.tasks.insert(name.clone());
395
396        let fq = free_queues
397            .entry(name.clone())
398            .or_default()
399            .entry(scheduler_core)
400            .or_default();
401
402        // (l) The timer queue handler contends for the `channel`
403        match channel.ceiling {
404            None => channel.ceiling = Some(tq.priority),
405            Some(ceil) => channel.ceiling = Some(cmp::max(ceil, tq.priority)),
406        }
407
408        if let Some(prio) = scheduler_prio {
409            // (k) Schedulers contend for the free queue
410            match *fq {
411                None => *fq = Some(prio),
412                Some(ceil) => *fq = Some(cmp::max(ceil, prio)),
413            }
414
415            // (m) Schedulers contend for the timer queue
416            tq.ceiling = cmp::max(tq.ceiling, prio);
417
418            // (o) core-local messages that connect tasks running at different priorities need to be
419            // `Send`
420            if scheduler_core == schedulee_core && schedulee_prio != prio {
421                must_be_send = true;
422            }
423        } else if scheduler_core == schedulee_core {
424            // (k, m) schedules from `init` are excluded from the ceiling analysis
425            // (o) but schedules from `init` must be `Send`
426            must_be_send = true;
427        }
428
429        if must_be_send {
430            {
431                let send_types = send_types.entry(scheduler_core).or_default();
432
433                schedulee.inputs.iter().for_each(|input| {
434                    send_types.insert(input.ty.clone());
435                });
436            }
437
438            let send_types = send_types.entry(schedulee_core).or_default();
439
440            schedulee.inputs.iter().for_each(|input| {
441                send_types.insert(input.ty.clone());
442            });
443        }
444    }
445
446    // no channel should ever be empty
447    debug_assert!(channels.values().all(|dispatchers| dispatchers
448        .values()
449        .all(|channels| channels.values().all(|channel| !channel.tasks.is_empty()))));
450
451    // Compute channel capacities
452    for channel in channels
453        .values_mut()
454        .flat_map(|dispatchers| dispatchers.values_mut())
455        .flat_map(|dispatcher| dispatcher.values_mut())
456    {
457        channel.capacity = channel
458            .tasks
459            .iter()
460            .map(|name| app.software_tasks[name].args.capacity)
461            .sum();
462    }
463
464    // Compute the capacity of the timer queues
465    for tq in timer_queues.values_mut() {
466        tq.capacity = tq
467            .tasks
468            .iter()
469            .map(|name| app.software_tasks[name].args.capacity)
470            .sum();
471    }
472
473    let used_cores = app
474        .inits
475        .keys()
476        .cloned()
477        .chain(app.idles.keys().cloned())
478        .chain(app.hardware_tasks.values().map(|task| task.args.core))
479        .chain(app.software_tasks.values().map(|task| task.args.core))
480        .collect();
481
482    Analysis {
483        used_cores,
484        channels,
485        free_queues,
486        initialization_barriers,
487        late_resources,
488        locations,
489        ownerships,
490        send_types,
491        spawn_barriers,
492        sync_types,
493        timer_queues,
494    }
495}
496
497/// Priority ceiling
498pub type Ceiling = Option<u8>;
499
500/// Task priority
501pub type Priority = u8;
502
503/// Receiver core
504pub type Receiver = Core;
505
506/// Resource name
507pub type Resource = Ident;
508
509/// Sender core
510pub type Sender = Core;
511
512/// Task name
513pub type Task = Ident;
514
515/// The result of analyzing an RTFM application
516pub struct Analysis {
517    /// Cores that have been assigned at least task, `#[init]` or `#[idle]`
518    pub used_cores: BTreeSet<Core>,
519
520    /// SPSC message channels between cores
521    pub channels: Channels,
522
523    /// Priority ceilings of "free queues"
524    pub free_queues: FreeQueues,
525
526    /// Maps a core to the late resources it initializes
527    pub late_resources: LateResources,
528
529    /// Location of all *used* resources
530    ///
531    /// If a resource is not listed here it means that's a "dead" (never accessed) resource and the
532    /// backend should not generate code for it
533    ///
534    /// `None` indicates that the resource must reside in memory visible to more than one core
535    /// ("shared memory")
536    pub locations: Locations,
537
538    /// Resource ownership
539    pub ownerships: Ownerships,
540
541    /// These types must implement the `Send` trait
542    pub send_types: SendTypes,
543
544    /// These types must implement the `Sync` trait
545    pub sync_types: SyncTypes,
546
547    /// Cross-core initialization barriers
548    pub initialization_barriers: InitializationBarriers,
549
550    /// Cross-core spawn barriers
551    pub spawn_barriers: SpawnBarriers,
552
553    /// Timer queues
554    pub timer_queues: TimerQueues,
555}
556
557/// All cross-core channels, keyed by receiver core, then by dispatch priority and then by sender
558/// core
559pub type Channels = BTreeMap<Receiver, BTreeMap<Priority, BTreeMap<Sender, Channel>>>;
560
561/// All free queues, keyed by task and then by sender
562pub type FreeQueues = IndexMap<Task, BTreeMap<Sender, Ceiling>>;
563
564/// Late resources, keyed by the core that initializes them
565pub type LateResources = BTreeMap<Core, BTreeSet<Resource>>;
566
567/// Location of all *used* resources
568pub type Locations = IndexMap<Resource, Location>;
569
570/// Resource ownership
571pub type Ownerships = IndexMap<Resource, Ownership>;
572
573/// These types must implement the `Send` trait
574pub type SendTypes = BTreeMap<Core, Set<Box<Type>>>;
575
576/// These types must implement the `Sync` trait
577pub type SyncTypes = BTreeMap<Core, Set<Box<Type>>>;
578
579/// Cross-core initialization barriers
580pub type InitializationBarriers =
581    BTreeMap</* user */ Receiver, BTreeSet</* initializer */ Sender>>;
582
583/// Cross-core spawn barriers
584pub type SpawnBarriers =
585    BTreeMap</* spawnee */ Receiver, BTreeMap</* spawner */ Sender, /* before_init */ bool>>;
586
587/// Timer queues, keyed by core
588pub type TimerQueues = BTreeMap<Core, TimerQueue>;
589
590/// The timer queue
591#[derive(Debug)]
592pub struct TimerQueue {
593    /// The capacity of the queue
594    pub capacity: u8,
595
596    /// The priority ceiling of the queue
597    pub ceiling: u8,
598
599    /// Priority of the timer queue handler
600    pub priority: u8,
601
602    /// Tasks that can be scheduled on this queue
603    pub tasks: BTreeSet<Task>,
604}
605
606impl Default for TimerQueue {
607    fn default() -> Self {
608        Self {
609            capacity: 0,
610            ceiling: 1,
611            priority: 1,
612            tasks: BTreeSet::new(),
613        }
614    }
615}
616
617/// A channel between cores used to send messages
618#[derive(Debug, Default)]
619pub struct Channel {
620    /// The channel capacity
621    pub capacity: u8,
622
623    /// The (sender side) priority ceiling of this SPSC channel
624    pub ceiling: Ceiling,
625
626    /// Tasks that can be spawned on this channel
627    pub tasks: BTreeSet<Task>,
628}
629
630/// Resource ownership
631#[derive(Clone, Copy, Debug, PartialEq)]
632pub enum Ownership {
633    /// Owned by a single task
634    Owned {
635        /// Priority of the task that owns this resource
636        priority: u8,
637    },
638
639    /// "Co-owned" by more than one task; all of them have the same priority
640    CoOwned {
641        /// Priority of the tasks that co-own this resource
642        priority: u8,
643    },
644
645    /// Contended by more than one task; the tasks have different priorities
646    Contended {
647        /// Priority ceiling
648        ceiling: u8,
649    },
650}
651
652impl Ownership {
653    /// Whether this resource needs to a lock at this priority level
654    pub fn needs_lock(&self, priority: u8) -> bool {
655        match self {
656            Ownership::Owned { .. } | Ownership::CoOwned { .. } => false,
657
658            Ownership::Contended { ceiling } => {
659                debug_assert!(*ceiling >= priority);
660
661                priority < *ceiling
662            }
663        }
664    }
665
666    /// Whether this resource is exclusively owned
667    pub fn is_owned(&self) -> bool {
668        match self {
669            Ownership::Owned { .. } => true,
670            _ => false,
671        }
672    }
673}
674
675/// Resource location
676#[derive(Clone, Debug, PartialEq)]
677pub enum Location {
678    /// resource that resides in `core`
679    Owned {
680        /// Core on which this resource is located
681        core: u8,
682
683        /// Whether this resource is cross initialized
684        cross_initialized: bool,
685    },
686
687    /// `Access::Shared` resource shared between different cores
688    Shared {
689        /// Cores that share access to this resource
690        cores: BTreeSet<Core>,
691    },
692}
693
694impl Location {
695    /// If resource is owned this returns the core on which is located
696    pub fn core(&self) -> Option<u8> {
697        match *self {
698            Location::Owned { core, .. } => Some(core),
699
700            Location::Shared { .. } => None,
701        }
702    }
703
704    fn cross_initialized(&self) -> bool {
705        match *self {
706            Location::Owned {
707                cross_initialized, ..
708            } => cross_initialized,
709            Location::Shared { .. } => false,
710        }
711    }
712}