use super::layer_trait::Layer;
use super::types::GenericLayer;
use crate::config::Config;
use crate::session::Session;
use crate::utils::file_parser::has_context_blocks;
use crate::utils::file_renderer::expand_context_blocks;
use anyhow::Result;
use colored::*;
use std::io::IsTerminal;
pub struct LayeredOrchestrator {
pub layers: Vec<Box<dyn Layer + Send + Sync>>,
}
impl LayeredOrchestrator {
pub fn from_config(config: &Config, role: &str) -> Self {
let (role_config, _, _, _, _) = config.get_role_config(role);
if !role_config.enable_layers {
return Self { layers: Vec::new() };
}
let enabled_layers = config.get_enabled_layers_for_role(role);
let mut layers: Vec<Box<dyn Layer + Send + Sync>> = Vec::new();
for layer_config in enabled_layers {
layers.push(Box::new(GenericLayer::new(layer_config)));
}
if layers.is_empty() && role_config.enable_layers {
panic!("CRITICAL CONFIG ERROR: Role '{}' has enable_layers=true but no layers are configured or enabled. Define layers in config or set enable_layers=false.", role);
}
Self { layers }
}
pub async fn from_config_with_processed_prompts(
config: &Config,
role: &str,
project_dir: &std::path::Path,
) -> Self {
let (role_config, _, _, _, _) = config.get_role_config(role);
if !role_config.enable_layers {
return Self { layers: Vec::new() };
}
let enabled_layers = config.get_enabled_layers_for_role(role);
let mut layers: Vec<Box<dyn Layer + Send + Sync>> = Vec::new();
for mut layer_config in enabled_layers {
crate::session::helper_functions::process_layer_system_prompt(
&mut layer_config,
project_dir,
)
.await;
layers.push(Box::new(GenericLayer::new(layer_config)));
}
if layers.is_empty() && role_config.enable_layers {
panic!("CRITICAL CONFIG ERROR: Role '{}' has enable_layers=true but no layers are configured or enabled. Define layers in config or set enable_layers=false.", role);
}
Self { layers }
}
pub async fn process(
&self,
input: &str,
session: &mut Session,
config: &Config,
role: &str,
operation_cancelled: tokio::sync::watch::Receiver<bool>,
) -> Result<String> {
if self.layers.is_empty() {
return Ok(input.to_string());
}
let mut current_input = input.to_string();
let mut total_input_tokens = 0;
let mut total_output_tokens = 0;
let mut total_cost = 0.0;
if std::io::stdin().is_terminal() {
if config.get_log_level().is_debug_enabled() {
println!(
"{}",
format!("Layer Processing Pipeline ({} layers)", self.layers.len())
.bright_cyan()
);
}
} else {
println!(
"{}",
format!("Layer Processing Pipeline ({} layers)", self.layers.len()).bright_cyan()
);
}
for layer in &self.layers {
if *operation_cancelled.borrow() {
return Err(anyhow::anyhow!("Operation cancelled"));
}
let layer_name = layer.name();
if std::io::stdin().is_terminal() {
print!("\r{} {} ", "→".bright_yellow(), layer_name.bright_white());
use std::io::Write;
std::io::stdout().flush().ok();
} else {
println!("Processing: {} (${:.5})", layer_name, total_cost);
}
if !layer.config().mcp.server_refs.is_empty() {
if std::io::stdin().is_terminal() {
if config.get_log_level().is_debug_enabled() {
if layer.config().mcp.allowed_tools.is_empty() {
println!("{}", "All tools enabled for this layer".bright_magenta());
} else {
println!(
"{} {}",
"Tools enabled:".bright_magenta(),
layer.config().mcp.allowed_tools.join(", ")
);
}
}
} else {
if layer.config().mcp.allowed_tools.is_empty() {
println!("{}", "All tools enabled for this layer".bright_magenta());
} else {
println!(
"{} {}",
"Tools enabled:".bright_magenta(),
layer.config().mcp.allowed_tools.join(", ")
);
}
}
}
let result = layer
.process(¤t_input, session, config, operation_cancelled.clone())
.await?;
if *operation_cancelled.borrow() {
return Err(anyhow::anyhow!("Operation cancelled"));
}
self.display_layer_outputs(&result.outputs, layer_name);
if let Some(usage) = &result.token_usage {
if let Some(cost) = usage.cost {
println!(
"{}",
format!(
"${:.5} | {} tokens | {}ms",
cost,
usage.prompt_tokens + usage.output_tokens,
result.total_time_ms
)
.bright_magenta()
);
use std::io::Write;
std::io::stdout().flush().ok();
session.add_layer_stats_with_time(
layer_name,
&layer.config().get_effective_model(&session.info.model),
usage.prompt_tokens,
usage.output_tokens,
cost,
result.api_time_ms,
result.tool_time_ms,
result.total_time_ms,
);
total_input_tokens += usage.prompt_tokens;
total_output_tokens += usage.output_tokens;
total_cost += cost;
} else {
let cost_from_raw = result
.exchange
.response
.get("usage")
.and_then(|u| u.get("cost"))
.and_then(|c| c.as_f64());
if let Some(cost) = cost_from_raw {
println!("{}", format!("Layer cost (from raw): ${:.5} (Input: {} tokens, Output: {} tokens) | Time: API {}ms, Tools {}ms, Total {}ms",
cost, usage.prompt_tokens, usage.output_tokens,
result.api_time_ms, result.tool_time_ms, result.total_time_ms).bright_magenta());
session.add_layer_stats_with_time(
layer_name,
&layer.config().get_effective_model(&session.info.model),
usage.prompt_tokens,
usage.output_tokens,
cost,
result.api_time_ms,
result.tool_time_ms,
result.total_time_ms,
);
total_input_tokens += usage.prompt_tokens;
total_output_tokens += usage.output_tokens;
total_cost += cost;
} else {
println!(
"{} {}",
"ERROR: Layer".bright_red(),
layer_name.bright_yellow()
);
println!("{}", "OpenRouter did not provide cost data. Make sure usage.include=true is set!".bright_red());
total_input_tokens += usage.prompt_tokens;
total_output_tokens += usage.output_tokens;
session.add_layer_stats_with_time(
layer_name,
&layer.config().get_effective_model(&session.info.model),
usage.prompt_tokens,
usage.output_tokens,
0.0, result.api_time_ms,
result.tool_time_ms,
result.total_time_ms,
);
}
}
} else {
println!(
"{} {} | Time: API {}ms, Tools {}ms, Total {}ms",
"ERROR: No usage data for layer".bright_red(),
layer_name.bright_yellow(),
result.api_time_ms,
result.tool_time_ms,
result.total_time_ms
);
}
use crate::session::layers::OutputMode;
match layer.config().output_mode {
OutputMode::None => {
println!("{}", "Output mode: none (intermediate layer)".bright_cyan());
}
OutputMode::Append => {
println!(
"{}",
"Output mode: append (adding all layer outputs)".bright_cyan()
);
for output_text in &result.outputs {
session.add_message(layer.config().output_role.as_str(), output_text);
}
}
OutputMode::Replace => {
println!(
"{}",
"Output mode: replace (replacing with all layer outputs)".bright_cyan()
);
let system_message = session
.messages
.iter()
.find(|m| m.role == "system")
.cloned();
session.messages.clear();
let mut final_messages = Vec::new();
if let Some(sys_msg) = system_message {
final_messages.push(sys_msg);
}
let current_dir = std::env::current_dir().unwrap_or_default();
if let Ok(initial_messages) =
crate::session::chat::session::get_initial_messages(
config,
role,
¤t_dir,
)
.await
{
final_messages.extend(initial_messages);
}
for output_text in &result.outputs {
let output_msg = crate::session::Message {
role: layer.config().output_role.as_str().to_string(),
content: output_text.clone(),
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
cached: false,
..Default::default()
};
final_messages.push(output_msg);
}
session.messages = final_messages;
}
OutputMode::Last => {
println!(
"{}",
"Output mode: last (adding last layer output)".bright_cyan()
);
let last_message = result.outputs.last().unwrap_or(&String::new()).clone();
session.add_message(layer.config().output_role.as_str(), &last_message);
}
OutputMode::Restart => {
println!(
"{}",
"Output mode: last (replacing with last layer output)".bright_cyan()
);
session.messages.clear();
let last_message = result.outputs.last().unwrap_or(&String::new()).clone();
session.add_message(layer.config().output_role.as_str(), &last_message);
}
};
let last_output = result.outputs.last().unwrap_or(&String::new()).clone();
current_input = if has_context_blocks(&last_output) {
crate::log_debug!(
"Expanding context blocks in layer {} output before passing to next layer",
layer_name
);
expand_context_blocks(&last_output)
} else {
last_output
};
}
let total_api_time_ms = session.info.total_api_time_ms;
let total_tool_time_ms = session.info.total_tool_time_ms;
let total_layer_time_ms = session.info.total_layer_time_ms;
if std::io::stdin().is_terminal() {
print!("\r \r");
}
println!(
"\n{} | {} tokens | ${:.5} | {}ms",
"Processing completed".bright_green(),
total_input_tokens + total_output_tokens,
total_cost,
total_api_time_ms + total_tool_time_ms + total_layer_time_ms
);
Ok(current_input)
}
fn display_layer_outputs(&self, outputs: &[String], layer_name: &str) {
for (i, output) in outputs.iter().enumerate() {
if outputs.len() > 1 {
println!("--- Output {} ---", i + 1);
}
if !output.trim().is_empty() {
let expanded_output = if has_context_blocks(output) {
crate::log_debug!(
"Context blocks detected in layer {} output, expanding...",
layer_name
);
expand_context_blocks(output)
} else {
output.clone()
};
self.display_formatted_assistant_output(&expanded_output, layer_name, i + 1);
}
}
}
fn display_formatted_assistant_output(
&self,
output: &str,
layer_name: &str,
output_index: usize,
) {
use colored::Colorize;
let title = format!(" Assistant Response | {} ", layer_name);
let separator_length = 70.max(title.len() + 4);
let dashes = "─".repeat(separator_length - title.len());
let separator = format!("──{}{}──", title.bright_cyan(), dashes.dimmed());
println!("{}", separator);
self.display_assistant_content_smart(output);
println!(
"{}",
format!("✓ Layer '{}' output {} completed", layer_name, output_index).bright_green()
);
println!("──────────────────");
use std::io::Write;
std::io::stdout().flush().ok();
}
fn display_assistant_content_smart(&self, content: &str) {
let lines: Vec<&str> = content.lines().collect();
if lines.len() <= 50 && content.chars().count() <= 5000 {
println!("{}", content);
} else if lines.len() > 50 {
for line in lines.iter().take(40) {
println!("{}", line);
}
println!(
"{}",
format!("... [+{} more lines]", lines.len().saturating_sub(40)).bright_black()
);
} else {
let truncated: String = content.chars().take(4997).collect();
println!("{}...", truncated);
println!("{}", "[Content truncated for display]".bright_black());
}
}
}