Skip to main content

fret_ui/tree/
ui_tree_view_cache.rs

1use super::*;
2
3impl<H: UiHost> UiTree<H> {
4    pub fn set_paint_cache_policy(&mut self, policy: PaintCachePolicy) {
5        self.paint_cache_policy = policy;
6    }
7
8    pub fn paint_cache_policy(&self) -> PaintCachePolicy {
9        self.paint_cache_policy
10    }
11
12    pub fn set_view_cache_enabled(&mut self, enabled: bool) {
13        self.view_cache_enabled = enabled;
14    }
15
16    pub fn view_cache_enabled(&self) -> bool {
17        self.view_cache_enabled
18    }
19
20    pub fn set_inspection_active(&mut self, active: bool) {
21        self.inspection_active = active;
22    }
23
24    pub fn inspection_active(&self) -> bool {
25        self.inspection_active
26    }
27
28    pub fn set_paint_cache_enabled(&mut self, enabled: bool) {
29        self.set_paint_cache_policy(if enabled {
30            PaintCachePolicy::Enabled
31        } else {
32            PaintCachePolicy::Disabled
33        });
34    }
35
36    pub fn paint_cache_enabled(&self) -> bool {
37        match self.paint_cache_policy {
38            PaintCachePolicy::Auto => !self.inspection_active,
39            PaintCachePolicy::Enabled => true,
40            PaintCachePolicy::Disabled => false,
41        }
42    }
43
44    /// Ingest the previous frame's recorded ops from `scene` for paint-cache replay.
45    ///
46    /// Call this **before** clearing `scene` for the next frame.
47    ///
48    /// Important:
49    /// - This method is destructive: it swaps the scene op storage into the UI tree. Do not call
50    ///   it more than once for the same `Scene` before `Scene::clear()`.
51    /// - `scene` must contain the previous frame ops that were produced by **this** `UiTree`.
52    /// - The paint cache records absolute op index ranges into the previous frame ops vector, so
53    ///   sharing a single `Scene` across multiple `UiTree`s is not compatible with paint-cache
54    ///   ingestion unless each tree records into an isolated scene.
55    pub fn ingest_paint_cache_source(&mut self, scene: &mut Scene) {
56        scene.swap_storage(
57            &mut self.paint_cache.prev_ops,
58            &mut self.paint_cache.prev_fingerprint,
59        );
60    }
61
62    pub(in crate::tree) fn view_cache_active(&self) -> bool {
63        self.view_cache_enabled && !self.inspection_active
64    }
65
66    pub(in crate::tree) fn nearest_view_cache_root(&self, node: NodeId) -> Option<NodeId> {
67        let mut current = Some(node);
68        while let Some(id) = current {
69            let n = self.nodes.get(id)?;
70            if n.view_cache.enabled {
71                return Some(id);
72            }
73            current = n.parent;
74        }
75        None
76    }
77
78    pub(in crate::tree) fn mark_cache_root_dirty(
79        &mut self,
80        root: NodeId,
81        source: UiDebugInvalidationSource,
82        detail: UiDebugInvalidationDetail,
83    ) {
84        self.dirty_cache_roots.insert(root);
85        self.dirty_cache_root_reasons.insert(root, (source, detail));
86    }
87
88    pub(crate) fn should_reuse_view_cache_node(&self, node: NodeId) -> bool {
89        if !self.view_cache_active() {
90            return false;
91        }
92        let Some(n) = self.nodes.get(node) else {
93            return false;
94        };
95        if !n.view_cache.enabled {
96            return false;
97        }
98        if n.view_cache_needs_rerender {
99            return false;
100        }
101        // View-cache reuse is an authoring-level "skip re-render" decision, not a "skip repaint"
102        // decision: paint invalidations (e.g. hover/focus) should not force a child render pass.
103        if !n.invalidation.layout {
104            return true;
105        }
106
107        // Layout invalidations are only safe to ignore for cache roots that opt into contained
108        // layout behavior with definite (non-auto) sizing and known bounds.
109        //
110        // This mirrors the same conditions used by invalidation propagation to truncate at cache
111        // boundaries.
112        n.view_cache.contained_layout
113            && n.view_cache.layout_definite
114            && n.bounds.size != Size::default()
115    }
116
117    pub(crate) fn view_cache_node_needs_rerender(&self, node: NodeId) -> bool {
118        self.nodes
119            .get(node)
120            .is_some_and(|n| n.view_cache_needs_rerender)
121    }
122
123    /// Configure view-cache behavior for a specific node.
124    ///
125    /// This is an advanced/low-level knob. Most applications should prefer declarative
126    /// view-cache boundaries, but retained widgets (and diagnostics harnesses) may need to enable
127    /// view caching explicitly on a node.
128    pub fn set_node_view_cache_flags(
129        &mut self,
130        node: NodeId,
131        enabled: bool,
132        contained_layout: bool,
133        layout_definite: bool,
134    ) {
135        if let Some(n) = self.nodes.get_mut(node) {
136            let next = ViewCacheFlags {
137                enabled,
138                contained_layout,
139                layout_definite,
140            };
141            if n.view_cache == next {
142                return;
143            }
144            n.view_cache = next;
145        }
146    }
147
148    pub(crate) fn set_node_view_cache_needs_rerender(&mut self, node: NodeId, needs: bool) {
149        if let Some(n) = self.nodes.get_mut(node) {
150            n.view_cache_needs_rerender = needs;
151        }
152        if !needs {
153            self.dirty_cache_roots.remove(&node);
154            self.dirty_cache_root_reasons.remove(&node);
155        }
156    }
157
158    pub(in crate::tree) fn clear_cache_root_dirty_tracking_if_clean(&mut self, node: NodeId) {
159        let should_clear = self
160            .nodes
161            .get(node)
162            .is_none_or(|n| !n.view_cache_needs_rerender && !n.invalidation.layout);
163        if should_clear {
164            self.dirty_cache_roots.remove(&node);
165            self.dirty_cache_root_reasons.remove(&node);
166        }
167    }
168
169    pub(in crate::tree) fn mark_view_cache_roots_needs_rerender_from_snapshot(
170        &mut self,
171        start: NodeId,
172        snapshot: Option<&UiDispatchSnapshot>,
173        source: UiDebugInvalidationSource,
174        detail: UiDebugInvalidationDetail,
175    ) {
176        if !self.view_cache_active() {
177            return;
178        }
179
180        let mut current = Some(start);
181        while let Some(id) = current {
182            let next = match snapshot {
183                Some(snapshot) => snapshot.parent.get(id).copied().flatten(),
184                None => self.nodes.get(id).and_then(|n| n.parent),
185            };
186
187            if let Some(n) = self.nodes.get_mut(id)
188                && n.view_cache.enabled
189            {
190                n.view_cache_needs_rerender = true;
191                self.mark_cache_root_dirty(id, source, detail);
192            }
193
194            current = next;
195        }
196    }
197
198    /// Mark the nearest view-cache root as "needs rerender" without forcing a layout invalidation walk.
199    ///
200    /// This is intended for barrier-driven widgets (virtual lists, scroll content, etc.) that can
201    /// detect a logical "window mismatch" during layout and need the *next frame* to rerun the
202    /// declarative render closure to rebuild children, but do not benefit from triggering an
203    /// additional contained relayout pass in the *current* frame.
204    pub(crate) fn mark_nearest_view_cache_root_needs_rerender(
205        &mut self,
206        node: NodeId,
207        source: UiDebugInvalidationSource,
208        detail: UiDebugInvalidationDetail,
209    ) {
210        if !self.view_cache_active() {
211            return;
212        }
213
214        if !Self::invalidation_marks_view_dirty(source, Invalidation::HitTestOnly, detail) {
215            return;
216        }
217
218        let Some(root) = self.nearest_view_cache_root(node) else {
219            return;
220        };
221
222        let mut current: Option<NodeId> = Some(root);
223        while let Some(id) = current {
224            let next_parent = self.nodes.get(id).and_then(|n| n.parent);
225            if let Some(n) = self.nodes.get_mut(id)
226                && n.view_cache.enabled
227            {
228                n.view_cache_needs_rerender = true;
229                self.mark_cache_root_dirty(id, source, detail);
230            }
231            current = next_parent;
232        }
233    }
234
235    /// Repair invalidation propagation for newly mounted auto-sized cache roots.
236    ///
237    /// During declarative mounting we may discover `ViewCache` roots before their parent pointers
238    /// are fully connected. When view caching is active, invalidation propagation can be
239    /// truncated at cache roots, and a cache root that is only marked dirty on itself may never be
240    /// laid out by its (still-clean) ancestors. This shows up as cache-root subtrees stuck at
241    /// `Rect::default()` origins (e.g. scripted clicks using semantics bounds land in the wrong
242    /// place).
243    ///
244    /// Call this after `repair_parent_pointers_from_layer_roots()` and before `layout_all` so the
245    /// next layout pass walks far enough to place newly mounted cache-root subtrees.
246    pub(crate) fn propagate_auto_sized_view_cache_root_invalidations(&mut self) {
247        if !self.view_cache_active() {
248            return;
249        }
250
251        let targets: Vec<NodeId> = self
252            .nodes
253            .iter()
254            .filter_map(|(id, n)| {
255                (n.view_cache.enabled
256                    && n.view_cache.contained_layout
257                    && !n.view_cache.layout_definite
258                    && n.bounds.size == Size::default()
259                    && (n.invalidation.layout || n.invalidation.hit_test))
260                    .then_some(id)
261            })
262            .collect();
263
264        for root in targets {
265            self.mark_invalidation_with_source(
266                root,
267                Invalidation::HitTest,
268                UiDebugInvalidationSource::Other,
269            );
270        }
271    }
272
273    fn collapse_observation_index_to_view_cache_roots(
274        &self,
275        mut index: ObservationIndex,
276    ) -> ObservationIndex {
277        let mut per_root: HashMap<NodeId, HashMap<ModelId, ObservationMask>> = HashMap::new();
278        for (node, entries) in index.by_node.drain() {
279            let target = self.nearest_view_cache_root(node).unwrap_or(node);
280            let models = per_root.entry(target).or_default();
281            for (model, mask) in entries {
282                models
283                    .entry(model)
284                    .and_modify(|m| *m = m.union(mask))
285                    .or_insert(mask);
286            }
287        }
288
289        let mut out = ObservationIndex::default();
290        for (node, models) in per_root {
291            let mut list: Vec<(ModelId, ObservationMask)> = Vec::with_capacity(models.len());
292            for (model, mask) in models {
293                list.push((model, mask));
294            }
295            out.by_node.insert(node, list.clone());
296            for (model, mask) in list {
297                out.by_model.entry(model).or_default().insert(node, mask);
298            }
299        }
300        out
301    }
302
303    fn collapse_global_observation_index_to_view_cache_roots(
304        &self,
305        mut index: GlobalObservationIndex,
306    ) -> GlobalObservationIndex {
307        let mut per_root: HashMap<NodeId, HashMap<TypeId, ObservationMask>> = HashMap::new();
308        for (node, entries) in index.by_node.drain() {
309            let target = self.nearest_view_cache_root(node).unwrap_or(node);
310            let globals = per_root.entry(target).or_default();
311            for (global, mask) in entries {
312                globals
313                    .entry(global)
314                    .and_modify(|m| *m = m.union(mask))
315                    .or_insert(mask);
316            }
317        }
318
319        let mut out = GlobalObservationIndex::default();
320        for (node, globals) in per_root {
321            let mut list: Vec<(TypeId, ObservationMask)> = Vec::with_capacity(globals.len());
322            for (global, mask) in globals {
323                list.push((global, mask));
324            }
325            out.by_node.insert(node, list.clone());
326            for (global, mask) in list {
327                out.by_global.entry(global).or_default().insert(node, mask);
328            }
329        }
330        out
331    }
332
333    pub(in crate::tree) fn collapse_layout_observations_to_view_cache_roots_if_needed(&mut self) {
334        if !self.view_cache_active() {
335            return;
336        }
337        let observed_in_layout = std::mem::take(&mut self.observed_in_layout);
338        self.observed_in_layout =
339            self.collapse_observation_index_to_view_cache_roots(observed_in_layout);
340
341        let observed_globals_in_layout = std::mem::take(&mut self.observed_globals_in_layout);
342        self.observed_globals_in_layout =
343            self.collapse_global_observation_index_to_view_cache_roots(observed_globals_in_layout);
344    }
345
346    pub(in crate::tree) fn collapse_paint_observations_to_view_cache_roots_if_needed(&mut self) {
347        if !self.view_cache_active() {
348            return;
349        }
350        let observed_in_paint = std::mem::take(&mut self.observed_in_paint);
351        self.observed_in_paint =
352            self.collapse_observation_index_to_view_cache_roots(observed_in_paint);
353
354        let observed_globals_in_paint = std::mem::take(&mut self.observed_globals_in_paint);
355        self.observed_globals_in_paint =
356            self.collapse_global_observation_index_to_view_cache_roots(observed_globals_in_paint);
357    }
358
359    pub(in crate::tree) fn expand_view_cache_layout_invalidations_if_needed(&mut self) {
360        if !self.view_cache_active() {
361            return;
362        }
363        let targets: Vec<NodeId> = self
364            .nodes
365            .iter()
366            .filter_map(|(id, n)| (n.view_cache.enabled && n.invalidation.layout).then_some(id))
367            .collect();
368        if targets.is_empty() {
369            return;
370        }
371        for root in targets {
372            self.mark_view_cache_layout_dirty_subtree(root);
373        }
374    }
375
376    fn mark_view_cache_layout_dirty_subtree(&mut self, root: NodeId) {
377        let mut stack: Vec<NodeId> = vec![root];
378        while let Some(id) = stack.pop() {
379            let (prev, next, layout_before, layout_after) = {
380                let Some(n) = self.nodes.get_mut(id) else {
381                    continue;
382                };
383                let prev = n.invalidation;
384                let layout_before = n.invalidation.layout;
385                n.invalidation.mark(Invalidation::Layout);
386                let next = n.invalidation;
387                let layout_after = n.invalidation.layout;
388                for &child in &n.children {
389                    stack.push(child);
390                }
391                (prev, next, layout_before, layout_after)
392            };
393            record_layout_invalidation_transition(
394                &mut self.layout_invalidations_count,
395                layout_before,
396                layout_after,
397            );
398            self.update_invalidation_counters(prev, next);
399        }
400
401        self.rebuild_subtree_layout_dirty_counts_and_propagate(root);
402    }
403}