pub mod cache;
pub mod cancellation; pub mod chat; mod chat_helper; pub mod context; pub mod helper_functions; pub mod history; pub mod image; pub mod layers; pub mod logger; pub mod modal; mod model_utils; pub mod output; mod project_context;
pub mod video; pub mod background_jobs;
pub mod inbox; pub mod inject_listener; pub mod pipelines; pub mod report; pub mod smart_summarizer; mod token_counter; pub mod webhook_listener; pub mod workflows;
pub use crate::providers::{
AiProvider, ProviderExchange, ProviderFactory, ProviderResponse, TokenUsage,
};
pub use background_jobs::{BackgroundJobManager, CompletedJob};
pub use cache::{CacheManager, CacheStatistics};
pub use helper_functions::summarize_context;
pub use layers::{InputMode, Layer, LayerConfig, LayerMcpConfig, LayerResult};
pub use model_utils::model_supports_caching;
pub use output::{
detect_output_mode, JsonlSink, OutputMode, OutputSink, SilentSink, WebSocketSink,
};
pub use project_context::ProjectContext;
pub use smart_summarizer::SmartSummarizer;
pub use token_counter::{
calculate_minimum_session_tokens, estimate_full_context_tokens, estimate_message_tokens,
estimate_session_tokens, estimate_tokens, truncate_to_tokens, validate_session_token_threshold,
};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Message {
pub role: String,
pub content: String,
pub timestamp: u64,
#[serde(default = "default_cache_marker")]
pub cached: bool, #[serde(default, skip_serializing_if = "Option::is_none")]
pub cache_ttl: Option<String>, #[serde(skip_serializing_if = "Option::is_none")]
pub tool_call_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<serde_json::Value>, #[serde(skip_serializing_if = "Option::is_none")]
pub images: Option<Vec<crate::session::image::ImageAttachment>>, #[serde(skip_serializing_if = "Option::is_none")]
pub videos: Option<Vec<crate::session::video::VideoAttachment>>, #[serde(default, skip_serializing_if = "Option::is_none")]
pub thinking: Option<serde_json::Value>, #[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>, }
fn default_cache_marker() -> bool {
false
}
fn current_timestamp() -> u64 {
crate::utils::time::now_secs()
}
impl Default for Message {
fn default() -> Self {
Self {
role: String::new(),
content: String::new(),
timestamp: current_timestamp(),
cached: false,
cache_ttl: None,
tool_call_id: None,
name: None,
tool_calls: None,
images: None,
videos: None,
thinking: None,
id: None,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct SessionInfo {
pub name: String,
pub created_at: u64,
pub model: String,
pub provider: String,
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_read_tokens: u64,
pub cache_write_tokens: u64, #[serde(default)]
pub reasoning_tokens: u64, pub total_cost: f64,
pub duration_seconds: u64,
pub layer_stats: Vec<LayerStats>, #[serde(default)]
pub tool_calls: u64, #[serde(default)]
pub total_api_time_ms: u64, #[serde(default)]
pub total_tool_time_ms: u64, #[serde(default)]
pub total_layer_time_ms: u64, #[serde(default)]
pub compression_stats: CompressionStats,
#[serde(default)]
pub total_api_calls: usize, #[serde(default)]
pub current_non_cached_tokens: u64,
#[serde(default)]
pub current_total_tokens: u64,
#[serde(default = "current_timestamp")]
pub last_cache_checkpoint_time: u64,
#[serde(default)]
pub cache_next_user_message: bool,
#[serde(default)]
pub spending_threshold_checkpoint: f64,
#[serde(default)]
pub compression_hint_count: usize,
#[serde(default)]
pub last_compression_hint_shown: u64,
#[serde(default)]
pub context_tokens_after_last_compression: usize, #[serde(default)]
pub predicted_turns_at_last_compression: f64, #[serde(default)]
pub api_calls_at_last_compression: usize, #[serde(default)]
pub output_tokens_at_last_compression: u64, #[serde(default)]
pub consecutive_compressions: u32,
}
#[derive(Debug, Clone)]
pub enum CompressionKind {
Task,
Phase,
Project,
Conversation,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CompressionStats {
pub task_compressions: usize,
pub phase_compressions: usize,
pub project_compressions: usize,
pub conversation_compressions: usize,
pub total_messages_removed: usize,
pub total_tokens_saved: u64,
}
impl CompressionStats {
pub fn add_compression(&mut self, kind: CompressionKind, messages: usize, tokens: u64) {
match kind {
CompressionKind::Task => self.task_compressions += 1,
CompressionKind::Phase => self.phase_compressions += 1,
CompressionKind::Project => self.project_compressions += 1,
CompressionKind::Conversation => self.conversation_compressions += 1,
}
self.total_messages_removed += messages;
self.total_tokens_saved += tokens;
}
pub fn total_compressions(&self) -> usize {
self.task_compressions
+ self.phase_compressions
+ self.project_compressions
+ self.conversation_compressions
}
pub fn avg_compression_ratio(&self) -> f64 {
if self.total_compressions() == 0 {
0.0
} else {
self.total_tokens_saved as f64 / (self.total_tokens_saved as f64 + 10000.0)
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LayerStats {
pub layer_type: String,
pub model: String,
pub input_tokens: u64,
pub output_tokens: u64,
pub cost: f64,
pub timestamp: u64,
#[serde(default)]
pub api_time_ms: u64, #[serde(default)]
pub tool_time_ms: u64, #[serde(default)]
pub total_time_ms: u64, }
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Session {
pub info: SessionInfo,
pub messages: Vec<Message>,
pub session_file: Option<PathBuf>,
}
impl Session {
pub fn new(name: String, model: String, provider: String) -> Self {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
Self {
info: SessionInfo {
name,
created_at: timestamp,
model,
provider,
input_tokens: 0,
output_tokens: 0,
cache_read_tokens: 0,
cache_write_tokens: 0,
reasoning_tokens: 0,
total_cost: 0.0,
duration_seconds: 0,
layer_stats: Vec::new(), tool_calls: 0, total_api_time_ms: 0,
total_tool_time_ms: 0,
total_layer_time_ms: 0,
compression_stats: CompressionStats::default(),
total_api_calls: 0,
current_non_cached_tokens: 0,
current_total_tokens: 0,
last_cache_checkpoint_time: timestamp,
cache_next_user_message: false,
spending_threshold_checkpoint: 0.0,
compression_hint_count: 0,
last_compression_hint_shown: 0,
context_tokens_after_last_compression: 0,
predicted_turns_at_last_compression: 0.0,
api_calls_at_last_compression: 0,
output_tokens_at_last_compression: 0,
consecutive_compressions: 0,
},
messages: Vec::new(),
session_file: None,
}
}
pub fn add_message(&mut self, role: &str, content: &str) -> Message {
let message = Message {
role: role.to_string(),
content: content.to_string(),
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
cached: false,
..Default::default()
};
self.messages.push(message.clone());
message
}
pub fn add_cache_checkpoint(&mut self, system: bool) -> Result<bool, anyhow::Error> {
if system {
for msg in self.messages.iter_mut() {
if msg.role == "system" {
msg.cached = crate::session::model_supports_caching(&self.info.model);
if msg.cached {
self.info.current_non_cached_tokens = 0;
self.info.current_total_tokens = 0;
return Ok(true);
}
return Ok(false);
}
}
Ok(false)
} else {
Err(anyhow::anyhow!(
"Use CacheManager for content cache markers instead of add_cache_checkpoint"
))
}
}
pub fn add_layer_stats(
&mut self,
layer_type: &str,
model: &str,
input_tokens: u64,
output_tokens: u64,
cost: f64,
) {
self.add_layer_stats_with_time(
layer_type,
model,
input_tokens,
output_tokens,
cost,
0,
0,
0,
);
}
#[allow(clippy::too_many_arguments)]
pub fn add_layer_stats_with_time(
&mut self,
layer_type: &str,
model: &str,
input_tokens: u64,
output_tokens: u64,
cost: f64,
api_time_ms: u64,
tool_time_ms: u64,
total_time_ms: u64,
) {
let stats = LayerStats {
layer_type: layer_type.to_string(),
model: model.to_string(),
input_tokens,
output_tokens,
cost,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
api_time_ms,
tool_time_ms,
total_time_ms,
};
self.info.layer_stats.push(stats);
self.info.input_tokens += input_tokens;
self.info.output_tokens += output_tokens;
self.info.total_cost += cost;
self.info.total_api_time_ms += api_time_ms;
self.info.total_tool_time_ms += tool_time_ms;
self.info.total_layer_time_ms += total_time_ms;
}
pub fn save(&self) -> Result<(), anyhow::Error> {
if let Some(session_file) = &self.session_file {
let summary_entry = serde_json::json!({
"type": "SUMMARY",
"timestamp": std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
"session_info": &self.info
});
append_to_session_file(session_file, &serde_json::to_string(&summary_entry)?)?;
Ok(())
} else {
Err(anyhow::anyhow!("No session file specified"))
}
}
}
pub mod persistence;
pub use persistence::{
append_to_session_file, clean_interrupted_tool_calls, extract_runtime_state_from_log,
find_most_recent_session_for_project, get_sessions_dir, list_available_sessions, load_session,
SessionRuntimeState,
};
pub mod prompt;
pub use prompt::{add_compression_hints_to_prompt, create_system_prompt};
pub mod completion;
pub use completion::{
chat_completion_with_provider, chat_completion_with_validation, ChatCompletionProviderParams,
ChatCompletionWithValidationParams,
};
#[cfg(test)]
mod tests {
use super::*;
use crate::session::persistence::has_incomplete_tool_calls;
use serde_json::json;
fn create_test_message(
role: &str,
content: &str,
tool_calls: Option<serde_json::Value>,
tool_call_id: Option<String>,
) -> Message {
Message {
role: role.to_string(),
content: content.to_string(),
timestamp: 1234567890,
cached: false,
cache_ttl: None,
tool_call_id,
name: None,
tool_calls,
images: None,
videos: None,
thinking: None,
id: None,
}
}
#[test]
fn test_has_incomplete_tool_calls_complete_sequence() {
let messages = vec![
create_test_message("user", "List files", None, None),
create_test_message(
"assistant",
"I'll list the files for you.",
Some(
json!([{"id": "call_123", "name": "list_files", "arguments": {"directory": "."}}]),
),
None,
),
create_test_message(
"tool",
"file1.txt\nfile2.txt",
None,
Some("call_123".to_string()),
),
create_test_message(
"assistant",
"Here are the files in the directory.",
None,
None,
),
];
assert!(!has_incomplete_tool_calls(&messages));
}
#[test]
fn test_has_incomplete_tool_calls_incomplete_sequence() {
let messages = vec![
create_test_message("user", "List files", None, None),
create_test_message(
"assistant",
"I'll list the files for you.",
Some(
json!([{"id": "call_123", "name": "list_files", "arguments": {"directory": "."}}]),
),
None,
),
];
assert!(has_incomplete_tool_calls(&messages));
}
#[test]
fn test_has_incomplete_tool_calls_multiple_calls_partial() {
let messages = vec![
create_test_message("user", "Do multiple things", None, None),
create_test_message(
"assistant",
"I'll do multiple things.",
Some(json!([
{"id": "call_123", "name": "list_files", "arguments": {"directory": "."}},
{"id": "call_456", "name": "shell", "arguments": {"command": "pwd"}}
])),
None,
),
create_test_message(
"tool",
"file1.txt\nfile2.txt",
None,
Some("call_123".to_string()),
),
];
assert!(has_incomplete_tool_calls(&messages));
}
#[test]
fn test_has_incomplete_tool_calls_no_tool_calls() {
let messages = vec![
create_test_message("user", "Hello", None, None),
create_test_message("assistant", "Hello! How can I help you?", None, None),
];
assert!(!has_incomplete_tool_calls(&messages));
}
#[test]
fn test_clean_interrupted_tool_calls_preserves_complete() {
let mut messages = vec![
create_test_message("user", "List files", None, None),
create_test_message(
"assistant",
"I'll list the files for you.",
Some(
json!([{"id": "call_123", "name": "list_files", "arguments": {"directory": "."}}]),
),
None,
),
create_test_message(
"tool",
"file1.txt\nfile2.txt",
None,
Some("call_123".to_string()),
),
create_test_message("assistant", "Here are the files.", None, None),
];
let original_count = messages.len();
let cleaned = clean_interrupted_tool_calls(&mut messages, "Test");
assert!(!cleaned);
assert_eq!(messages.len(), original_count);
}
#[test]
fn test_clean_interrupted_tool_calls_inserts_synthetic_result() {
let mut messages = vec![
create_test_message("user", "List files", None, None),
create_test_message(
"assistant",
"I'll list the files for you.",
Some(
json!([{"id": "call_123", "function": {"name": "list_files", "arguments": "{\"directory\": \".\"}"}}]),
),
None,
),
];
let cleaned = clean_interrupted_tool_calls(&mut messages, "Test");
assert!(cleaned);
assert_eq!(messages.len(), 3); assert_eq!(messages[0].role, "user");
assert_eq!(messages[1].role, "assistant");
assert_eq!(messages[2].role, "tool");
assert_eq!(messages[2].tool_call_id.as_deref(), Some("call_123"));
assert!(messages[2].content.contains("interrupted"));
}
#[test]
fn test_session_loading_preserves_stats_from_summary() {
use std::io::Write;
use tempfile::NamedTempFile;
let mut temp_file = NamedTempFile::new().expect("Failed to create temp file");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "SUMMARY",
"timestamp": 1000,
"session_info": {
"name": "test-session",
"created_at": 1000,
"model": "openrouter:anthropic/claude-sonnet-4",
"provider": "openrouter",
"input_tokens": 100,
"output_tokens": 50,
"cache_read_tokens": 20,
"cache_write_tokens": 5,
"total_cost": 0.001,
"duration_seconds": 10,
"layer_stats": [
{
"layer_type": "main",
"model": "openrouter:anthropic/claude-sonnet-4",
"input_tokens": 100,
"output_tokens": 50,
"cost": 0.001,
"timestamp": 1000,
"api_time_ms": 500,
"tool_time_ms": 100,
"total_time_ms": 600
}
],
"tool_calls": 5,
"total_api_time_ms": 500,
"total_tool_time_ms": 100,
"total_layer_time_ms": 600,
"compression_stats": {
"task_compressions": 0,
"phase_compressions": 0,
"project_compressions": 0,
"conversation_compressions": 0,
"total_messages_removed": 0,
"total_tokens_saved": 0
},
"total_api_calls": 1,
"current_non_cached_tokens": 0,
"current_total_tokens": 0,
"last_cache_checkpoint_time": 1000,
"cache_next_user_message": false,
"spending_threshold_checkpoint": 0.0,
"compression_hint_count": 0,
"last_compression_hint_shown": 0
}
}))
.unwrap()
)
.expect("Failed to write SUMMARY");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "STATS",
"timestamp": 900, "total_cost": 0.0,
"input_tokens": 0,
"output_tokens": 0,
"cache_read_tokens": 0,
"cache_write_tokens": 0,
"tool_calls": 0,
"total_api_time_ms": 0,
"total_tool_time_ms": 0,
"total_layer_time_ms": 0,
"model": "openrouter:anthropic/claude-sonnet-4",
"provider": "openrouter"
}))
.unwrap()
)
.expect("Failed to write old STATS");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"role": "user",
"content": "Hello",
"timestamp": 1100,
"cached": false
}))
.unwrap()
)
.expect("Failed to write message");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "SUMMARY",
"timestamp": 2000, "session_info": {
"name": "test-session",
"created_at": 1000,
"model": "openrouter:anthropic/claude-sonnet-4",
"provider": "openrouter",
"input_tokens": 200, "output_tokens": 100,
"cache_read_tokens": 40,
"cache_write_tokens": 10,
"total_cost": 0.002,
"duration_seconds": 20,
"layer_stats": [
{
"layer_type": "main",
"model": "openrouter:anthropic/claude-sonnet-4",
"input_tokens": 200,
"output_tokens": 100,
"cost": 0.002,
"timestamp": 2000,
"api_time_ms": 1000,
"tool_time_ms": 200,
"total_time_ms": 1200
}
],
"tool_calls": 10,
"total_api_time_ms": 1000,
"total_tool_time_ms": 200,
"total_layer_time_ms": 1200,
"compression_stats": {
"task_compressions": 0,
"phase_compressions": 0,
"project_compressions": 0,
"conversation_compressions": 0,
"total_messages_removed": 0,
"total_tokens_saved": 0
},
"total_api_calls": 2,
"current_non_cached_tokens": 0,
"current_total_tokens": 0,
"last_cache_checkpoint_time": 2000,
"cache_next_user_message": false,
"spending_threshold_checkpoint": 0.0,
"compression_hint_count": 0,
"last_compression_hint_shown": 0
}
}))
.unwrap()
)
.expect("Failed to write final SUMMARY");
temp_file.flush().expect("Failed to flush temp file");
let session =
load_session(&temp_file.path().to_path_buf()).expect("Failed to load session");
assert_eq!(
session.info.input_tokens, 200,
"Input tokens should be from final SUMMARY"
);
assert_eq!(
session.info.output_tokens, 100,
"Output tokens should be from final SUMMARY"
);
assert_eq!(
session.info.cache_read_tokens, 40,
"Cache read tokens should be from final SUMMARY"
);
assert_eq!(
session.info.total_cost, 0.002,
"Total cost should be from final SUMMARY"
);
assert_eq!(
session.info.tool_calls, 10,
"Tool calls should be from final SUMMARY"
);
assert_eq!(
session.info.total_api_time_ms, 1000,
"API time should be from final SUMMARY"
);
assert_eq!(
session.info.total_tool_time_ms, 200,
"Tool time should be from final SUMMARY"
);
assert_eq!(
session.info.total_layer_time_ms, 1200,
"Layer time should be from final SUMMARY"
);
assert_eq!(
session.info.layer_stats.len(),
1,
"Layer stats should be preserved"
);
assert_eq!(
session.info.layer_stats[0].input_tokens, 200,
"Layer stats should match final SUMMARY"
);
assert_eq!(
session.info.layer_stats[0].output_tokens, 100,
"Layer stats should match final SUMMARY"
);
assert_eq!(
session.info.layer_stats[0].cost, 0.002,
"Layer stats cost should match final SUMMARY"
);
assert_eq!(session.messages.len(), 1, "Should have 1 message");
assert_eq!(
session.messages[0].role, "user",
"Message should be user message"
);
assert_eq!(
session.info.model, "openrouter:anthropic/claude-sonnet-4",
"Model should be from SUMMARY"
);
}
#[test]
fn test_session_loading_restores_model_from_command() {
use std::io::Write;
use tempfile::NamedTempFile;
let mut temp_file = NamedTempFile::new().expect("Failed to create temp file");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "SUMMARY",
"timestamp": 1000,
"session_info": {
"name": "test-session",
"created_at": 1000,
"model": "openrouter:anthropic/claude-sonnet-4",
"provider": "openrouter",
"input_tokens": 100,
"output_tokens": 50,
"cache_read_tokens": 20,
"cache_write_tokens": 5,
"total_cost": 0.001,
"duration_seconds": 10,
"layer_stats": [],
"tool_calls": 5,
"total_api_time_ms": 500,
"total_tool_time_ms": 100,
"total_layer_time_ms": 600,
"compression_stats": {
"task_compressions": 0,
"phase_compressions": 0,
"project_compressions": 0,
"conversation_compressions": 0,
"total_messages_removed": 0,
"total_tokens_saved": 0
},
"total_api_calls": 1,
"current_non_cached_tokens": 0,
"current_total_tokens": 0,
"last_cache_checkpoint_time": 1000,
"cache_next_user_message": false,
"spending_threshold_checkpoint": 0.0,
"compression_hint_count": 0,
"last_compression_hint_shown": 0
}
}))
.unwrap()
)
.expect("Failed to write SUMMARY");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "COMMAND",
"timestamp": 1500,
"command": "/model openrouter:openai/gpt-4o"
}))
.unwrap()
)
.expect("Failed to write COMMAND");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"role": "user",
"content": "Hello with new model",
"timestamp": 1600,
"cached": false
}))
.unwrap()
)
.expect("Failed to write message");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "SUMMARY",
"timestamp": 2000,
"session_info": {
"name": "test-session",
"created_at": 1000,
"model": "openrouter:openai/gpt-4o",
"provider": "openrouter",
"input_tokens": 200,
"output_tokens": 100,
"cache_read_tokens": 40,
"cache_write_tokens": 10,
"total_cost": 0.002,
"duration_seconds": 20,
"layer_stats": [],
"tool_calls": 10,
"total_api_time_ms": 1000,
"total_tool_time_ms": 200,
"total_layer_time_ms": 1200,
"compression_stats": {
"task_compressions": 0,
"phase_compressions": 0,
"project_compressions": 0,
"conversation_compressions": 0,
"total_messages_removed": 0,
"total_tokens_saved": 0
},
"total_api_calls": 2,
"current_non_cached_tokens": 0,
"current_total_tokens": 0,
"last_cache_checkpoint_time": 2000,
"cache_next_user_message": false,
"spending_threshold_checkpoint": 0.0,
"compression_hint_count": 0,
"last_compression_hint_shown": 0
}
}))
.unwrap()
)
.expect("Failed to write final SUMMARY");
temp_file.flush().expect("Failed to flush temp file");
let session =
load_session(&temp_file.path().to_path_buf()).expect("Failed to load session");
assert_eq!(
session.info.model, "openrouter:openai/gpt-4o",
"Model should be restored from /model command and final SUMMARY"
);
assert_eq!(session.info.input_tokens, 200);
assert_eq!(session.info.total_cost, 0.002);
}
#[test]
fn test_session_loading_model_without_command() {
use std::io::Write;
use tempfile::NamedTempFile;
let mut temp_file = NamedTempFile::new().expect("Failed to create temp file");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"type": "SUMMARY",
"timestamp": 1000,
"session_info": {
"name": "test-session",
"created_at": 1000,
"model": "openrouter:google/gemini-2.0-flash-exp:free",
"provider": "openrouter",
"input_tokens": 100,
"output_tokens": 50,
"cache_read_tokens": 20,
"cache_write_tokens": 5,
"total_cost": 0.001,
"duration_seconds": 10,
"layer_stats": [],
"tool_calls": 5,
"total_api_time_ms": 500,
"total_tool_time_ms": 100,
"total_layer_time_ms": 600,
"compression_stats": {
"task_compressions": 0,
"phase_compressions": 0,
"project_compressions": 0,
"conversation_compressions": 0,
"total_messages_removed": 0,
"total_tokens_saved": 0
},
"total_api_calls": 1,
"current_non_cached_tokens": 0,
"current_total_tokens": 0,
"last_cache_checkpoint_time": 1000,
"cache_next_user_message": false,
"spending_threshold_checkpoint": 0.0,
"compression_hint_count": 0,
"last_compression_hint_shown": 0
}
}))
.unwrap()
)
.expect("Failed to write SUMMARY");
writeln!(
temp_file,
"{}",
serde_json::to_string(&json!({
"role": "user",
"content": "Hello",
"timestamp": 1100,
"cached": false
}))
.unwrap()
)
.expect("Failed to write message");
temp_file.flush().expect("Failed to flush temp file");
let session =
load_session(&temp_file.path().to_path_buf()).expect("Failed to load session");
assert_eq!(
session.info.model, "openrouter:google/gemini-2.0-flash-exp:free",
"Model should be restored from SUMMARY when no /model command exists"
);
}
}