praxis_graph/types/
output.rs

1use praxis_llm::ToolCall;
2
3/// Graph output items from LLM execution
4/// 
5/// Represents structured outputs that can be persisted and traced separately.
6/// This is distinct from `praxis_llm::openai::OutputItem` which is the raw API format.
7#[derive(Debug, Clone)]
8pub enum GraphOutput {
9    /// Reasoning output from models like GPT-5, o1
10    Reasoning {
11        id: String,
12        content: String,
13    },
14    /// Regular message output
15    Message {
16        id: String,
17        content: String,
18        tool_calls: Option<Vec<ToolCall>>,
19    },
20}
21
22impl GraphOutput {
23    pub fn reasoning(id: impl Into<String>, content: impl Into<String>) -> Self {
24        Self::Reasoning {
25            id: id.into(),
26            content: content.into(),
27        }
28    }
29    
30    pub fn message(id: impl Into<String>, content: impl Into<String>) -> Self {
31        Self::Message {
32            id: id.into(),
33            content: content.into(),
34            tool_calls: None,
35        }
36    }
37    
38    pub fn message_with_tools(
39        id: impl Into<String>,
40        content: impl Into<String>,
41        tool_calls: Vec<ToolCall>,
42    ) -> Self {
43        Self::Message {
44            id: id.into(),
45            content: content.into(),
46            tool_calls: Some(tool_calls),
47        }
48    }
49    
50    pub fn id(&self) -> &str {
51        match self {
52            Self::Reasoning { id, .. } => id,
53            Self::Message { id, .. } => id,
54        }
55    }
56    
57    pub fn content(&self) -> &str {
58        match self {
59            Self::Reasoning { content, .. } => content,
60            Self::Message { content, .. } => content,
61        }
62    }
63}
64