1use ecl_core::llm::{CompletionRequest, LlmProvider, Message};
4use ecl_core::{Result as EclResult, WorkflowId};
5use serde::{Deserialize, Serialize};
6use std::sync::Arc;
7
8#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct SimpleWorkflowInput {
11 pub workflow_id: WorkflowId,
13
14 pub topic: String,
16}
17
18impl SimpleWorkflowInput {
19 pub fn new(topic: impl Into<String>) -> Self {
21 Self {
22 workflow_id: WorkflowId::new(),
23 topic: topic.into(),
24 }
25 }
26}
27
28#[derive(Debug, Clone, Serialize, Deserialize)]
30pub struct SimpleWorkflowOutput {
31 pub workflow_id: WorkflowId,
33
34 pub generated_text: String,
36
37 pub critique: String,
39}
40
41#[derive(Clone)]
46pub struct SimpleWorkflowService {
47 llm: Arc<dyn LlmProvider>,
48}
49
50impl SimpleWorkflowService {
51 pub fn new(llm: Arc<dyn LlmProvider>) -> Self {
53 Self { llm }
54 }
55
56 pub async fn run_simple(&self, input: SimpleWorkflowInput) -> EclResult<SimpleWorkflowOutput> {
58 tracing::info!(
60 workflow_id = %input.workflow_id,
61 topic = %input.topic,
62 "Starting workflow"
63 );
64
65 let generated_text = self.generate_step(&input.topic).await?;
66
67 let critique = self.critique_step(&generated_text).await?;
69
70 tracing::info!(
71 workflow_id = %input.workflow_id,
72 "Workflow completed"
73 );
74
75 Ok(SimpleWorkflowOutput {
76 workflow_id: input.workflow_id,
77 generated_text,
78 critique,
79 })
80 }
81
82 async fn generate_step(&self, topic: &str) -> EclResult<String> {
84 tracing::info!(topic = %topic, "Generating content");
85
86 let request = CompletionRequest::new(vec![Message::user(format!(
87 "Write a short paragraph about: {}",
88 topic
89 ))])
90 .with_system_prompt("You are a helpful content generator. Write clear, concise paragraphs.")
91 .with_max_tokens(500);
92
93 let response = self.llm.complete(request).await?;
94
95 tracing::info!(tokens = response.tokens_used.total(), "Content generated");
96
97 Ok(response.content)
98 }
99
100 async fn critique_step(&self, content: &str) -> EclResult<String> {
102 tracing::info!("Critiquing generated content");
103
104 let request = CompletionRequest::new(vec![Message::user(format!(
105 "Please provide constructive criticism of the following text:\n\n{}",
106 content
107 ))])
108 .with_system_prompt(
109 "You are a helpful writing critic. Provide specific, actionable feedback.",
110 )
111 .with_max_tokens(300);
112
113 let response = self.llm.complete(request).await?;
114
115 tracing::info!(tokens = response.tokens_used.total(), "Critique completed");
116
117 Ok(response.content)
118 }
119}
120
121#[cfg(test)]
122#[allow(clippy::unwrap_used)]
123mod tests {
124 use super::*;
125 use ecl_core::llm::MockLlmProvider;
126
127 #[tokio::test]
128 async fn test_simple_workflow_input_creation() {
129 let input = SimpleWorkflowInput::new("Test topic");
130 assert_eq!(input.topic, "Test topic");
131 }
132
133 #[tokio::test]
134 async fn test_simple_workflow_execution() {
135 let mock_llm = Arc::new(MockLlmProvider::new(vec![
136 "Generated content about the topic.".to_string(),
137 "This is constructive feedback.".to_string(),
138 ]));
139
140 let service = SimpleWorkflowService::new(mock_llm);
141 let input = SimpleWorkflowInput::new("Rust programming");
142
143 let output = service.run_simple(input.clone()).await.unwrap();
144
145 assert_eq!(output.workflow_id, input.workflow_id);
146 assert_eq!(output.generated_text, "Generated content about the topic.");
147 assert_eq!(output.critique, "This is constructive feedback.");
148 }
149}