use super::animation::show_smart_animation;
use crate::config::Config;
use crate::session::chat::session::ChatSession;
use anyhow::Result;
use colored::*;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub async fn process_layered_response(
input: &str,
chat_session: &mut ChatSession,
config: &Config,
role: &str,
operation_cancelled: tokio::sync::watch::Receiver<bool>,
) -> Result<String> {
let mut system_message_cached = false;
for msg in &chat_session.session.messages {
if msg.role == "system" && msg.cached {
system_message_cached = true;
break;
}
}
if !system_message_cached {
if let Ok(cached) = chat_session.session.add_cache_checkpoint(true) {
if cached && crate::session::model_supports_caching(&chat_session.model) {
println!(
"{}",
"System message has been automatically marked for caching to save tokens."
.yellow()
);
let _ = chat_session.save();
}
}
}
let animation_cancel = Arc::new(AtomicBool::new(false));
let animation_cancel_clone = animation_cancel.clone();
let current_cost = chat_session.session.info.total_cost;
let animation_task = tokio::spawn(async move {
let _ = show_smart_animation(animation_cancel_clone, current_cost).await;
});
if config.get_log_level().is_debug_enabled() {
println!("{}", "Using layered processing with model-specific caching - only supported models will use caching".bright_cyan());
} else {
println!("{}", "Using layered processing".bright_cyan());
}
let layer_output: String = match crate::session::layers::process_with_layers(
input,
&mut chat_session.session,
config,
role,
operation_cancelled.clone(),
)
.await
{
Ok(output) => output,
Err(e) => {
animation_cancel.store(true, Ordering::SeqCst);
let _ = animation_task.await;
return Err(e);
}
};
animation_cancel.store(true, Ordering::SeqCst);
let _ = animation_task.await;
Ok(layer_output)
}