use crate::config::Config;
use crate::log_info;
use crate::session::chat::assistant_output::print_assistant_response;
use crate::session::chat::continuation::constants::CONTINUATION_USER_MESSAGE_TEMPLATE;
use crate::session::chat::continuation::detection::{
should_trigger_continuation, ContinuationParams,
};
use crate::session::chat::continuation::file_context::{
collect_user_request_history, generate_file_context_content, parse_file_contexts,
};
use crate::session::chat::continuation::injection::inject_summary_request;
use crate::session::chat::session::ChatSession;
use anyhow::Result;
use colored::Colorize;
use std::sync::atomic::Ordering;
pub async fn process_continuation_response(
chat_session: &mut ChatSession,
response_content: &str,
_has_tool_calls: bool, config: &Config,
role: &str,
) -> Result<bool> {
if !chat_session.continuation_pending {
return Ok(false);
}
println!(
"{}",
"📋 Session Summary (Token limit reached)"
.bright_blue()
.bold()
);
println!("{}", "─".repeat(50).dimmed());
if response_content.trim().is_empty() {
crate::log_debug!("WARNING: Empty response content in continuation summary");
println!("{}", "(No summary provided by AI)".dimmed());
} else {
crate::log_debug!("Response content length: {} chars", response_content.len());
print_assistant_response(response_content, config, role, &None);
}
println!();
log_info!("Work summary received - resetting session for continuation...");
let system_message = chat_session
.session
.messages
.iter()
.find(|msg| msg.role == "system")
.cloned();
chat_session.session.messages.clear();
if let Some(system_msg) = system_message {
chat_session.session.messages.push(system_msg);
}
let current_dir = std::env::current_dir().unwrap_or_default();
if let Ok(initial_messages) =
crate::session::chat::session::get_initial_messages(config, role, ¤t_dir).await
{
chat_session.session.messages.extend(initial_messages);
}
let summary_message = crate::session::Message {
role: "assistant".to_string(),
content: response_content.to_string(),
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
cached: false,
..Default::default()
};
chat_session.session.messages.push(summary_message);
let user_history = collect_user_request_history(&chat_session.session.messages);
let file_contexts = parse_file_contexts(response_content);
let context_content = generate_file_context_content(&file_contexts);
let continuation_content = CONTINUATION_USER_MESSAGE_TEMPLATE
.replace("{}", &user_history)
.replace("{}", &context_content);
let continue_message = crate::session::Message {
role: "user".to_string(),
content: continuation_content,
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
cached: false,
..Default::default()
};
chat_session.session.messages.push(continue_message);
println!("{}", "🔄 Continuing Session".bright_green().bold());
println!("{}", "─".repeat(50).dimmed());
let task_count = user_history
.lines()
.filter(|line| !line.trim().is_empty())
.count();
if task_count > 0 {
println!(
"{} {}",
"📋 Continuing with".dimmed(),
format!("{} task(s)", task_count).bright_white()
);
}
if !file_contexts.is_empty() {
println!(
"{} {}",
"📁 Loaded context from".dimmed(),
format!("{} file(s)", file_contexts.len()).bright_white()
);
for (filepath, start, end) in &file_contexts {
println!(
" {} {}",
"•".dimmed(),
format!("{} (lines {}-{})", filepath, start, end).bright_cyan()
);
}
}
println!("{}", "🚀 Ready to continue...".bright_green());
println!();
chat_session.continuation_pending = false;
if !file_contexts.is_empty() {
log_info!("Loaded context from {} file(s)", file_contexts.len());
for (filepath, start, end) in &file_contexts {
log_info!(" {} (lines {}-{})", filepath, start, end);
}
} else {
log_info!("No file contexts found in AI summary - check format");
crate::log_debug!("Summary content for context parsing: {}", response_content);
}
let history_lines = user_history.lines().count();
if history_lines > 1 {
log_info!(
"Preserved {} user request(s) for continuation context",
history_lines
);
}
log_info!("Session context preserved - continuing automatically...");
Ok(true)
}
pub async fn check_and_handle_continuation(
chat_session: &mut ChatSession,
config: &Config,
) -> Result<bool> {
check_and_handle_continuation_with_cancellation(chat_session, config, None).await
}
pub async fn check_and_handle_continuation_with_cancellation(
chat_session: &mut ChatSession,
config: &Config,
operation_cancelled: Option<std::sync::Arc<std::sync::atomic::AtomicBool>>,
) -> Result<bool> {
if let Some(ref cancelled) = operation_cancelled {
if cancelled.load(Ordering::SeqCst) {
return Err(anyhow::anyhow!("Operation cancelled"));
}
}
let current_tokens = {
let tools = crate::mcp::get_available_functions(config).await;
crate::session::estimate_full_context_tokens(
&chat_session.session.messages,
if tools.is_empty() { None } else { Some(&tools) },
)
};
let mut params = ContinuationParams::new(chat_session, config, current_tokens);
if should_trigger_continuation(¶ms) {
if let Some(ref cancelled) = operation_cancelled {
if cancelled.load(Ordering::SeqCst) {
return Err(anyhow::anyhow!("Operation cancelled"));
}
}
inject_summary_request(&mut params)?;
return Ok(true); }
Ok(false) }