use cortex_mem_core::{
CortexFilesystem, CortexMem, CortexMemBuilder, EmbeddingClient,
EmbeddingConfig, LLMClient, QdrantConfig, SessionManager, VectorSearchEngine,
};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone)]
pub struct AppState {
#[allow(dead_code)]
pub cortex: Arc<CortexMem>,
pub filesystem: Arc<CortexFilesystem>,
pub session_manager: Arc<tokio::sync::RwLock<SessionManager>>,
pub llm_client: Option<Arc<dyn LLMClient>>,
#[allow(dead_code)]
pub vector_store: Option<Arc<dyn cortex_mem_core::vector_store::VectorStore>>,
pub embedding_client: Option<Arc<EmbeddingClient>>,
pub vector_engine: Arc<RwLock<Option<Arc<VectorSearchEngine>>>>,
pub data_dir: PathBuf,
pub current_tenant_root: Arc<RwLock<Option<PathBuf>>>,
pub current_tenant_id: Arc<RwLock<Option<String>>>,
}
impl AppState {
pub async fn new(data_dir: &str) -> anyhow::Result<Self> {
let data_dir = PathBuf::from(data_dir);
tracing::info!("Initializing Cortex Memory with unified automation...");
let cortex_dir = data_dir.join("cortex");
let (llm_client, embedding_config, qdrant_config) = Self::load_configs()?;
let mut builder = CortexMemBuilder::new(&cortex_dir);
if let Some(llm) = llm_client.clone() {
builder = builder.with_llm(llm);
}
if let Some(emb_cfg) = embedding_config {
builder = builder.with_embedding(emb_cfg);
}
if let Some(qdrant_cfg) = qdrant_config {
builder = builder.with_qdrant(qdrant_cfg);
}
let cortex = builder.build().await?;
tracing::info!("✅ Cortex Memory initialized with v2.5 MemoryEventCoordinator");
let filesystem = cortex.filesystem();
let session_manager = cortex.session_manager();
let embedding_client = cortex.embedding();
let vector_store = cortex.vector_store();
let vector_engine = if let (Some(_vs), Some(ec)) = (&vector_store, &embedding_client) {
let (_, _, qdrant_cfg_opt) = Self::load_configs()?;
if let Some(qdrant_cfg) = qdrant_cfg_opt {
if let Ok(qdrant_store) = cortex_mem_core::QdrantVectorStore::new(&qdrant_cfg).await
{
let qdrant_arc = Arc::new(qdrant_store);
if let Some(llm) = &llm_client {
Some(Arc::new(VectorSearchEngine::with_llm(
qdrant_arc,
ec.clone(),
filesystem.clone(),
llm.clone(),
)))
} else {
Some(Arc::new(VectorSearchEngine::new(
qdrant_arc,
ec.clone(),
filesystem.clone(),
)))
}
} else {
None
}
} else {
None
}
} else {
None
};
Ok(Self {
cortex: Arc::new(cortex),
filesystem,
session_manager,
llm_client,
vector_store,
embedding_client,
vector_engine: Arc::new(RwLock::new(vector_engine)),
data_dir,
current_tenant_root: Arc::new(RwLock::new(None)),
current_tenant_id: Arc::new(RwLock::new(None)),
})
}
fn load_configs() -> anyhow::Result<(
Option<Arc<dyn LLMClient>>,
Option<EmbeddingConfig>,
Option<QdrantConfig>,
)> {
if let Ok(config) = cortex_mem_config::Config::load("config.toml") {
tracing::info!("Loaded configuration from config.toml");
let llm_client = {
let llm_config = cortex_mem_core::llm::client::LLMConfig {
api_base_url: config.llm.api_base_url.clone(),
api_key: config.llm.api_key.clone(),
model_efficient: config.llm.model_efficient.clone(),
temperature: 0.1,
max_tokens: 4096,
};
match cortex_mem_core::llm::LLMClientImpl::new(llm_config) {
Ok(client) => {
tracing::info!("LLM client initialized from config");
Some(Arc::new(client) as Arc<dyn LLMClient>)
}
Err(e) => {
tracing::warn!("Failed to initialize LLM client: {}", e);
None
}
}
};
let embedding_config = EmbeddingConfig {
api_base_url: config.embedding.api_base_url,
api_key: config.embedding.api_key,
model_name: config.embedding.model_name,
batch_size: config.embedding.batch_size,
timeout_secs: config.embedding.timeout_secs,
};
let qdrant_config = QdrantConfig {
url: config.qdrant.url,
collection_name: config.qdrant.collection_name,
embedding_dim: config.qdrant.embedding_dim,
timeout_secs: config.qdrant.timeout_secs,
api_key: config.qdrant.api_key.clone(),
tenant_id: None, };
Ok((llm_client, Some(embedding_config), Some(qdrant_config)))
} else {
tracing::info!("Loading configuration from environment variables");
let llm_client = if let (Ok(api_url), Ok(api_key), Ok(model)) = (
std::env::var("LLM_API_BASE_URL"),
std::env::var("LLM_API_KEY"),
std::env::var("LLM_MODEL"),
) {
let config = cortex_mem_core::llm::client::LLMConfig {
api_base_url: api_url,
api_key,
model_efficient: model,
temperature: 0.1,
max_tokens: 4096,
};
match cortex_mem_core::llm::LLMClientImpl::new(config) {
Ok(client) => {
tracing::info!("LLM client initialized from env");
Some(Arc::new(client) as Arc<dyn LLMClient>)
}
Err(e) => {
tracing::warn!("Failed to initialize LLM client: {}", e);
None
}
}
} else {
tracing::warn!("LLM client not configured");
None
};
let embedding_config = if let (Ok(api_url), Ok(api_key), Ok(model)) = (
std::env::var("EMBEDDING_API_BASE_URL"),
std::env::var("EMBEDDING_API_KEY"),
std::env::var("EMBEDDING_MODEL"),
) {
Some(EmbeddingConfig {
api_base_url: api_url,
api_key,
model_name: model,
batch_size: 10,
timeout_secs: 30,
})
} else {
tracing::warn!("Embedding not configured");
None
};
let qdrant_config = if let (Ok(url), Ok(collection)) = (
std::env::var("QDRANT_URL"),
std::env::var("QDRANT_COLLECTION"),
) {
Some(QdrantConfig {
url,
collection_name: collection,
embedding_dim: std::env::var("QDRANT_EMBEDDING_DIM")
.ok()
.and_then(|s| s.parse().ok()),
timeout_secs: 30,
api_key: std::env::var("QDRANT_API_KEY").ok(),
tenant_id: None, })
} else {
tracing::warn!("Qdrant not configured");
None
};
Ok((llm_client, embedding_config, qdrant_config))
}
}
pub async fn list_tenants(&self) -> Vec<String> {
let possible_paths = vec![
self.data_dir.join("tenants"),
self.data_dir.join("cortex").join("tenants"),
];
let mut tenants = vec![];
for tenants_path in possible_paths {
if tenants_path.exists() {
if let Ok(entries) = std::fs::read_dir(&tenants_path) {
for entry in entries.flatten() {
if entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false) {
tenants.push(entry.file_name().to_string_lossy().to_string());
}
}
}
}
}
tenants
}
pub async fn switch_tenant(&self, tenant_id: &str) -> anyhow::Result<()> {
let possible_paths = vec![
self.data_dir.join("tenants").join(tenant_id),
self.data_dir.join("cortex").join("tenants").join(tenant_id),
];
let mut tenant_root = None;
for path in possible_paths {
if path.exists() {
tenant_root = Some(path);
break;
}
}
let tenant_root =
tenant_root.ok_or_else(|| anyhow::anyhow!("Tenant {} not found", tenant_id))?;
let mut current = self.current_tenant_root.write().await;
*current = Some(tenant_root.clone());
drop(current);
let mut current_id = self.current_tenant_id.write().await;
*current_id = Some(tenant_id.to_string());
drop(current_id);
tracing::info!("Switched to tenant root: {:?}", tenant_root);
if let (Some(ec), Some(llm)) = (&self.embedding_client, &self.llm_client) {
let (_, _, qdrant_cfg_opt) = Self::load_configs()?;
if let Some(mut qdrant_cfg) = qdrant_cfg_opt {
qdrant_cfg.tenant_id = Some(tenant_id.to_string());
if let Ok(qdrant_store) = cortex_mem_core::QdrantVectorStore::new(&qdrant_cfg).await
{
let qdrant_arc = Arc::new(qdrant_store);
let tenant_filesystem = Arc::new(CortexFilesystem::new(
tenant_root.to_string_lossy().as_ref(),
));
let new_vector_engine = Arc::new(VectorSearchEngine::with_llm(
qdrant_arc,
ec.clone(),
tenant_filesystem,
llm.clone(),
));
let mut engine = self.vector_engine.write().await;
*engine = Some(new_vector_engine);
tracing::info!(
"✅ VectorSearchEngine recreated for tenant: {} with collection: {}",
tenant_id,
qdrant_cfg.get_collection_name()
);
}
}
}
Ok(())
}
}