daedalus_planner/graph.rs
1use std::collections::BTreeMap;
2
3use serde::{Deserialize, Serialize};
4
5use crate::diagnostics::Diagnostic;
6
7/// Default execution-plan version for deterministic serde/goldens.
8pub const DEFAULT_PLAN_VERSION: &str = "0.1";
9
10/// Compute affinity hint for scheduling/GPU pass.
11pub use daedalus_core::compute::ComputeAffinity;
12/// Sync grouping metadata.
13#[allow(unused_imports)]
14pub use daedalus_core::sync::{SyncGroup, SyncPolicy};
15
16/// Stable hash helper used for goldens; simple FNV-1a for determinism.
17///
18/// ```
19/// use daedalus_planner::StableHash;
20/// let hash = StableHash::from_bytes(b"demo");
21/// assert_ne!(hash.0, 0);
22/// ```
23#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
24pub struct StableHash(pub u64);
25
26impl StableHash {
27 pub fn from_bytes(bytes: &[u8]) -> Self {
28 const FNV_OFFSET: u64 = 0xcbf29ce484222325;
29 const FNV_PRIME: u64 = 0x100000001b3;
30 let mut hash = FNV_OFFSET;
31 for b in bytes {
32 hash ^= *b as u64;
33 hash = hash.wrapping_mul(FNV_PRIME);
34 }
35 StableHash(hash)
36 }
37}
38
39/// Node reference within a graph (index-based for compactness).
40///
41/// ```
42/// use daedalus_planner::NodeRef;
43/// let node = NodeRef(3);
44/// assert_eq!(node.0, 3);
45/// ```
46#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
47pub struct NodeRef(pub usize);
48
49/// Port reference by name within a node.
50///
51/// ```
52/// use daedalus_planner::{NodeRef, PortRef};
53/// let port = PortRef { node: NodeRef(0), port: "out".into() };
54/// assert_eq!(port.port, "out");
55/// ```
56#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
57pub struct PortRef {
58 pub node: NodeRef,
59 pub port: String,
60}
61
62/// Edge from one node/port to another.
63///
64/// ```
65/// use daedalus_planner::{Edge, NodeRef, PortRef};
66/// let edge = Edge {
67/// from: PortRef { node: NodeRef(0), port: "out".into() },
68/// to: PortRef { node: NodeRef(1), port: "in".into() },
69/// metadata: Default::default(),
70/// };
71/// assert_eq!(edge.from.port, "out");
72/// ```
73#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
74pub struct Edge {
75 pub from: PortRef,
76 pub to: PortRef,
77 #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
78 pub metadata: BTreeMap<String, daedalus_data::model::Value>,
79}
80
81/// An instantiated node, identified by registry id.
82///
83/// ```
84/// use daedalus_planner::{ComputeAffinity, NodeInstance};
85/// use daedalus_registry::ids::NodeId;
86/// let node = NodeInstance {
87/// id: NodeId::new("demo.node"),
88/// bundle: None,
89/// label: None,
90/// inputs: vec![],
91/// outputs: vec![],
92/// compute: ComputeAffinity::CpuOnly,
93/// const_inputs: vec![],
94/// sync_groups: vec![],
95/// metadata: Default::default(),
96/// };
97/// assert_eq!(node.id.0, "demo.node");
98/// ```
99#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
100pub struct NodeInstance {
101 pub id: daedalus_registry::ids::NodeId,
102 pub bundle: Option<String>,
103 pub label: Option<String>,
104 pub inputs: Vec<String>,
105 pub outputs: Vec<String>,
106 #[serde(default)]
107 pub compute: ComputeAffinity,
108 #[serde(default)]
109 pub const_inputs: Vec<(String, daedalus_data::model::Value)>,
110 #[serde(default, skip_serializing_if = "Vec::is_empty")]
111 pub sync_groups: Vec<SyncGroup>,
112 #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
113 pub metadata: BTreeMap<String, daedalus_data::model::Value>,
114}
115
116/// Planner input graph (pre-pass).
117///
118/// ```
119/// use daedalus_planner::Graph;
120/// let graph = Graph::default();
121/// assert!(graph.nodes.is_empty());
122/// ```
123#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
124pub struct Graph {
125 pub nodes: Vec<NodeInstance>,
126 pub edges: Vec<Edge>,
127 /// Arbitrary metadata retained for goldens/debug.
128 pub metadata: BTreeMap<String, String>,
129 /// Graph-level metadata (typed values) that should be visible to nodes at runtime.
130 ///
131 /// This is distinct from `metadata` (string-only planner debug/golden output). Planner passes
132 /// may write to `metadata`; graph authors should write to `metadata_values`.
133 #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
134 pub metadata_values: BTreeMap<String, daedalus_data::model::Value>,
135}
136
137/// Contiguous GPU segment metadata.
138///
139/// ```
140/// use daedalus_planner::{GpuSegment, NodeRef};
141/// let seg = GpuSegment { buffer_id: 0, nodes: vec![NodeRef(0)] };
142/// assert_eq!(seg.nodes.len(), 1);
143/// ```
144#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
145pub struct GpuSegment {
146 pub buffer_id: usize,
147 pub nodes: Vec<NodeRef>,
148}
149
150/// Edge buffer hints used by the GPU pass.
151///
152/// ```
153/// use daedalus_planner::EdgeBufferInfo;
154/// let info = EdgeBufferInfo { edge_index: 0, gpu_fast_path: false, buffer_id: None };
155/// assert_eq!(info.edge_index, 0);
156/// ```
157#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
158pub struct EdgeBufferInfo {
159 /// Index into `Graph::edges`.
160 pub edge_index: usize,
161 /// True when both endpoints are GPU-capable, meaning the edge can reuse a GPU buffer.
162 pub gpu_fast_path: bool,
163 /// Buffer id used when `gpu_fast_path` is true.
164 pub buffer_id: Option<usize>,
165}
166
167impl Graph {
168 /// Identify contiguous GPU-to-GPU chains and assign them shared buffer ids, along with
169 /// edge annotations that mark where GPU fast paths can be used.
170 ///
171 /// ```
172 /// use daedalus_planner::{Graph, NodeInstance, ComputeAffinity, Edge, PortRef, NodeRef};
173 /// use daedalus_registry::ids::NodeId;
174 ///
175 /// let mut graph = Graph::default();
176 /// graph.nodes.push(NodeInstance {
177 /// id: NodeId::new("a"),
178 /// bundle: None,
179 /// label: None,
180 /// inputs: vec![],
181 /// outputs: vec!["out".into()],
182 /// compute: ComputeAffinity::GpuPreferred,
183 /// const_inputs: vec![],
184 /// sync_groups: vec![],
185 /// metadata: Default::default(),
186 /// });
187 /// graph.nodes.push(NodeInstance {
188 /// id: NodeId::new("b"),
189 /// bundle: None,
190 /// label: None,
191 /// inputs: vec!["in".into()],
192 /// outputs: vec![],
193 /// compute: ComputeAffinity::GpuPreferred,
194 /// const_inputs: vec![],
195 /// sync_groups: vec![],
196 /// metadata: Default::default(),
197 /// });
198 /// graph.edges.push(Edge {
199 /// from: PortRef { node: NodeRef(0), port: "out".into() },
200 /// to: PortRef { node: NodeRef(1), port: "in".into() },
201 /// metadata: Default::default(),
202 /// });
203 /// let (segments, edges) = graph.gpu_buffers();
204 /// assert_eq!(edges.len(), 1);
205 /// assert_eq!(segments.len(), 1);
206 /// ```
207 pub fn gpu_buffers(&self) -> (Vec<GpuSegment>, Vec<EdgeBufferInfo>) {
208 #[derive(Clone)]
209 struct Dsu {
210 parent: Vec<usize>,
211 }
212 impl Dsu {
213 fn new(n: usize) -> Self {
214 Self {
215 parent: (0..n).collect(),
216 }
217 }
218 fn find(&mut self, x: usize) -> usize {
219 if self.parent[x] != x {
220 let p = self.parent[x];
221 self.parent[x] = self.find(p);
222 }
223 self.parent[x]
224 }
225 fn union(&mut self, a: usize, b: usize) {
226 let ra = self.find(a);
227 let rb = self.find(b);
228 if ra != rb {
229 self.parent[rb] = ra;
230 }
231 }
232 }
233
234 let mut dsu = Dsu::new(self.nodes.len());
235 for e in &self.edges {
236 let from = &self.nodes[e.from.node.0];
237 let to = &self.nodes[e.to.node.0];
238 let gpu_gpu = matches!(
239 from.compute,
240 ComputeAffinity::GpuPreferred | ComputeAffinity::GpuRequired
241 ) && matches!(
242 to.compute,
243 ComputeAffinity::GpuPreferred | ComputeAffinity::GpuRequired
244 );
245 if gpu_gpu {
246 dsu.union(e.from.node.0, e.to.node.0);
247 }
248 }
249
250 let mut root_to_buf = BTreeMap::new();
251 let mut node_buf: Vec<Option<usize>> = vec![None; self.nodes.len()];
252 for (idx, n) in self.nodes.iter().enumerate() {
253 if matches!(
254 n.compute,
255 ComputeAffinity::GpuPreferred | ComputeAffinity::GpuRequired
256 ) {
257 let root = dsu.find(idx);
258 let buf_id = match root_to_buf.get(&root) {
259 Some(id) => *id,
260 None => {
261 let next = root_to_buf.len();
262 root_to_buf.insert(root, next);
263 next
264 }
265 };
266 node_buf[idx] = Some(buf_id);
267 }
268 }
269
270 let mut segments = Vec::new();
271 for (root, buf_id) in root_to_buf {
272 let mut members: Vec<NodeRef> = self
273 .nodes
274 .iter()
275 .enumerate()
276 .filter(|(i, _)| dsu.find(*i) == root)
277 .map(|(i, _)| NodeRef(i))
278 .collect();
279 members.sort_by_key(|nr| nr.0);
280 segments.push(GpuSegment {
281 buffer_id: buf_id,
282 nodes: members,
283 });
284 }
285 segments.sort_by_key(|s| s.buffer_id);
286
287 let mut edges = Vec::new();
288 for (i, e) in self.edges.iter().enumerate() {
289 let from = &self.nodes[e.from.node.0];
290 let to = &self.nodes[e.to.node.0];
291 let gpu_gpu = matches!(
292 from.compute,
293 ComputeAffinity::GpuPreferred | ComputeAffinity::GpuRequired
294 ) && matches!(
295 to.compute,
296 ComputeAffinity::GpuPreferred | ComputeAffinity::GpuRequired
297 );
298 let buffer_id = if gpu_gpu {
299 node_buf[e.from.node.0]
300 } else {
301 None
302 };
303 edges.push(EdgeBufferInfo {
304 edge_index: i,
305 gpu_fast_path: gpu_gpu,
306 buffer_id,
307 });
308 }
309
310 (segments, edges)
311 }
312}
313
314/// Final execution plan with diagnostics and stable hash for goldens.
315///
316/// ```
317/// use daedalus_planner::{ExecutionPlan, Graph};
318/// let plan = ExecutionPlan::new(Graph::default(), vec![]);
319/// assert_eq!(plan.graph.nodes.len(), 0);
320/// ```
321#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
322pub struct ExecutionPlan {
323 pub version: String,
324 pub graph: Graph,
325 pub diagnostics: Vec<Diagnostic>,
326 pub hash: StableHash,
327}
328
329impl ExecutionPlan {
330 /// Build a plan and compute its stable hash.
331 pub fn new(graph: Graph, diagnostics: Vec<Diagnostic>) -> Self {
332 let mut bytes = Vec::new();
333 bytes.extend_from_slice(&(graph.nodes.len() as u64).to_le_bytes());
334 bytes.extend_from_slice(&(graph.edges.len() as u64).to_le_bytes());
335 for n in &graph.nodes {
336 bytes.extend_from_slice(n.id.0.as_bytes());
337 bytes.push(match n.compute {
338 ComputeAffinity::CpuOnly => 0,
339 ComputeAffinity::GpuPreferred => 1,
340 ComputeAffinity::GpuRequired => 2,
341 });
342 }
343 for e in &graph.edges {
344 bytes.extend_from_slice(&(e.from.node.0 as u64).to_le_bytes());
345 bytes.extend_from_slice(&(e.to.node.0 as u64).to_le_bytes());
346 bytes.extend_from_slice(e.from.port.as_bytes());
347 bytes.extend_from_slice(e.to.port.as_bytes());
348 }
349 let hash = StableHash::from_bytes(&bytes);
350 Self {
351 version: DEFAULT_PLAN_VERSION.to_string(),
352 graph,
353 diagnostics,
354 hash,
355 }
356 }
357}