Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22#[serde(rename_all = "lowercase")]
23pub enum MemoryType {
24    #[default]
25    Episodic,
26    Semantic,
27    Procedural,
28    Working,
29}
30
31/// Store a memory request
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct StoreMemoryRequest {
34    pub agent_id: String,
35    pub content: String,
36    #[serde(default)]
37    pub memory_type: MemoryType,
38    #[serde(default = "default_importance")]
39    pub importance: f32,
40    #[serde(default)]
41    pub tags: Vec<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub session_id: Option<String>,
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub metadata: Option<serde_json::Value>,
46    /// Optional TTL in seconds. The memory is hard-deleted after this many
47    /// seconds from creation.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub ttl_seconds: Option<u64>,
50    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
51    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
52    /// decay engine on expiry (DECAY-3).
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub expires_at: Option<u64>,
55}
56
57fn default_importance() -> f32 {
58    0.5
59}
60
61impl StoreMemoryRequest {
62    /// Create a new store memory request
63    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
64        Self {
65            agent_id: agent_id.into(),
66            content: content.into(),
67            memory_type: MemoryType::default(),
68            importance: 0.5,
69            tags: Vec::new(),
70            session_id: None,
71            metadata: None,
72            ttl_seconds: None,
73            expires_at: None,
74        }
75    }
76
77    /// Set memory type
78    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
79        self.memory_type = memory_type;
80        self
81    }
82
83    /// Set importance score
84    pub fn with_importance(mut self, importance: f32) -> Self {
85        self.importance = importance.clamp(0.0, 1.0);
86        self
87    }
88
89    /// Set tags
90    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
91        self.tags = tags;
92        self
93    }
94
95    /// Set session ID
96    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
97        self.session_id = Some(session_id.into());
98        self
99    }
100
101    /// Set metadata
102    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
103        self.metadata = Some(metadata);
104        self
105    }
106
107    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
108    /// from creation.
109    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
110        self.ttl_seconds = Some(ttl_seconds);
111        self
112    }
113
114    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
115    /// `ttl_seconds` when both are set (DECAY-3).
116    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
117        self.expires_at = Some(expires_at);
118        self
119    }
120}
121
122/// Stored memory response from `POST /v1/memory/store`.
123///
124/// The server wraps the memory in a nested `memory` object:
125/// `{"memory": {"id": "...", "agent_id": "...", ...}, "embedding_time_ms": N}`.
126/// The `memory_id` and `agent_id` fields are convenience accessors mapped from
127/// `memory.id` and `memory.agent_id` respectively.
128#[derive(Debug, Clone, Serialize)]
129pub struct StoreMemoryResponse {
130    /// Memory ID (mapped from `memory.id`)
131    pub memory_id: String,
132    /// Agent ID (mapped from `memory.agent_id`)
133    pub agent_id: String,
134    /// Namespace (mapped from `memory.namespace`, defaults to `"default"`)
135    pub namespace: String,
136    /// Embedding latency in milliseconds
137    pub embedding_time_ms: Option<u64>,
138}
139
140impl<'de> serde::Deserialize<'de> for StoreMemoryResponse {
141    fn deserialize<D: serde::Deserializer<'de>>(
142        deserializer: D,
143    ) -> std::result::Result<Self, D::Error> {
144        use serde::de::Error;
145        let val = serde_json::Value::deserialize(deserializer)?;
146
147        // Server response: {"memory": {"id":"...","agent_id":"...",...}, "embedding_time_ms": N}
148        if let Some(memory) = val.get("memory") {
149            let memory_id = memory
150                .get("id")
151                .and_then(|v| v.as_str())
152                .ok_or_else(|| D::Error::missing_field("memory.id"))?
153                .to_string();
154            let agent_id = memory
155                .get("agent_id")
156                .and_then(|v| v.as_str())
157                .unwrap_or("")
158                .to_string();
159            let namespace = memory
160                .get("namespace")
161                .and_then(|v| v.as_str())
162                .unwrap_or("default")
163                .to_string();
164            let embedding_time_ms = val.get("embedding_time_ms").and_then(|v| v.as_u64());
165            return Ok(Self {
166                memory_id,
167                agent_id,
168                namespace,
169                embedding_time_ms,
170            });
171        }
172
173        // Legacy / mock format: {"memory_id":"...","agent_id":"...","namespace":"..."}
174        let memory_id = val
175            .get("memory_id")
176            .and_then(|v| v.as_str())
177            .ok_or_else(|| D::Error::missing_field("memory_id"))?
178            .to_string();
179        let agent_id = val
180            .get("agent_id")
181            .and_then(|v| v.as_str())
182            .unwrap_or("")
183            .to_string();
184        let namespace = val
185            .get("namespace")
186            .and_then(|v| v.as_str())
187            .unwrap_or("default")
188            .to_string();
189        Ok(Self {
190            memory_id,
191            agent_id,
192            namespace,
193            embedding_time_ms: None,
194        })
195    }
196}
197
198/// Fusion strategy for hybrid recall (CE-14).
199///
200/// Controls how vector and BM25 scores are combined when `routing = Hybrid`.
201/// `MinMax` is the server default since v0.11.2 (CEO architecture decision, DAK-1948).
202/// `RecallRequest` sends `None` by default, so the server default applies automatically.
203#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
204#[serde(rename_all = "snake_case")]
205pub enum FusionStrategy {
206    /// Reciprocal Rank Fusion (Cormack et al., SIGIR 2009).
207    /// Formula: score(d) = Σ 1 / (k + rank(d)), k = 60.
208    /// This variant is the Rust `Default` for ergonomic use; pass `None` in
209    /// `RecallRequest` to let the server apply its own default (MinMax since v0.11.2).
210    #[default]
211    Rrf,
212    /// Weighted min-max normalization — server default since v0.11.2.
213    #[serde(rename = "minmax")]
214    MinMax,
215}
216
217/// Retrieval routing mode for recall and search (CE-10).
218///
219/// Controls which retrieval index the server uses. `Auto` (default) lets the
220/// server pick the best strategy based on the query.
221#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
222#[serde(rename_all = "snake_case")]
223pub enum RoutingMode {
224    /// Server picks the best strategy (default).
225    Auto,
226    /// Force ANN vector search (HNSW).
227    Vector,
228    /// Force BM25 full-text search.
229    Bm25,
230    /// Fuse ANN and BM25 scores (RRF).
231    Hybrid,
232}
233
234/// Recall memories request
235#[derive(Debug, Clone, Serialize, Deserialize)]
236pub struct RecallRequest {
237    pub agent_id: String,
238    pub query: String,
239    #[serde(default = "default_top_k")]
240    pub top_k: usize,
241    #[serde(skip_serializing_if = "Option::is_none")]
242    pub memory_type: Option<MemoryType>,
243    #[serde(default)]
244    pub min_importance: f32,
245    #[serde(skip_serializing_if = "Option::is_none")]
246    pub session_id: Option<String>,
247    #[serde(default)]
248    pub tags: Vec<String>,
249    /// COG-2: traverse KG depth-1 from recalled memories and include
250    /// associatively linked memories in the response (default: false)
251    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
252    pub include_associated: bool,
253    /// COG-2: max associated memories to return (default: 10, max: 10)
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub associated_memories_cap: Option<u32>,
256    /// KG-3: KG traversal depth 1–3 (default: 1); requires include_associated
257    #[serde(skip_serializing_if = "Option::is_none")]
258    pub associated_memories_depth: Option<u8>,
259    /// KG-3: minimum edge weight for KG traversal (default: 0.0)
260    #[serde(skip_serializing_if = "Option::is_none")]
261    pub associated_memories_min_weight: Option<f32>,
262    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
263    #[serde(skip_serializing_if = "Option::is_none")]
264    pub since: Option<String>,
265    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
266    #[serde(skip_serializing_if = "Option::is_none")]
267    pub until: Option<String>,
268    /// CE-10: retrieval routing mode. `None` uses the server default (`auto`).
269    #[serde(skip_serializing_if = "Option::is_none")]
270    pub routing: Option<RoutingMode>,
271    /// CE-13: cross-encoder reranking. `None` uses server default (`true` for recall,
272    /// `false` for search). Set to `Some(false)` to disable on latency-sensitive paths.
273    #[serde(skip_serializing_if = "Option::is_none")]
274    pub rerank: Option<bool>,
275    /// CE-14: fusion strategy when `routing = Hybrid`. `None` uses server default (`Rrf`).
276    #[serde(skip_serializing_if = "Option::is_none")]
277    pub fusion: Option<FusionStrategy>,
278    /// CE-17: explicit vector/BM25 weight for Hybrid routing (0.0–1.0).
279    /// When set, overrides the adaptive heuristic from `QueryClassifier`.
280    /// Omit for adaptive defaults (recommended for most callers).
281    /// Only effective when `routing = RoutingMode::Hybrid`.
282    #[serde(skip_serializing_if = "Option::is_none")]
283    pub vector_weight: Option<f32>,
284    /// v0.11.0: fetch session-adjacent memories within ±5 min of each top result.
285    /// `None` uses server default (`true`). Set to `Some(false)` to disable for
286    /// latency-sensitive paths.
287    #[serde(skip_serializing_if = "Option::is_none")]
288    pub neighborhood: Option<bool>,
289}
290
291fn default_top_k() -> usize {
292    5
293}
294
295impl RecallRequest {
296    /// Create a new recall request
297    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
298        Self {
299            agent_id: agent_id.into(),
300            query: query.into(),
301            top_k: 5,
302            memory_type: None,
303            min_importance: 0.0,
304            session_id: None,
305            tags: Vec::new(),
306            include_associated: false,
307            associated_memories_cap: None,
308            associated_memories_depth: None,
309            associated_memories_min_weight: None,
310            since: None,
311            until: None,
312            routing: None,
313            rerank: None,
314            fusion: None,
315            vector_weight: None,
316            neighborhood: None,
317        }
318    }
319
320    /// Set number of results
321    pub fn with_top_k(mut self, top_k: usize) -> Self {
322        self.top_k = top_k;
323        self
324    }
325
326    /// Filter by memory type
327    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
328        self.memory_type = Some(memory_type);
329        self
330    }
331
332    /// Set minimum importance threshold
333    pub fn with_min_importance(mut self, min: f32) -> Self {
334        self.min_importance = min;
335        self
336    }
337
338    /// Filter by session
339    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
340        self.session_id = Some(session_id.into());
341        self
342    }
343
344    /// Filter by tags
345    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
346        self.tags = tags;
347        self
348    }
349
350    /// COG-2: include KG depth-1 associated memories in the response
351    pub fn with_associated(mut self) -> Self {
352        self.include_associated = true;
353        self
354    }
355
356    /// COG-2: set max associated memories cap (default: 10, max: 10)
357    pub fn with_associated_cap(mut self, cap: u32) -> Self {
358        self.include_associated = true;
359        self.associated_memories_cap = Some(cap);
360        self
361    }
362
363    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
364    pub fn with_since(mut self, since: impl Into<String>) -> Self {
365        self.since = Some(since.into());
366        self
367    }
368
369    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
370    pub fn with_until(mut self, until: impl Into<String>) -> Self {
371        self.until = Some(until.into());
372        self
373    }
374
375    /// CE-10: set retrieval routing mode
376    pub fn with_routing(mut self, routing: RoutingMode) -> Self {
377        self.routing = Some(routing);
378        self
379    }
380
381    /// CE-13: enable or disable cross-encoder reranking (server default: true for recall)
382    pub fn with_rerank(mut self, rerank: bool) -> Self {
383        self.rerank = Some(rerank);
384        self
385    }
386
387    /// KG-3: set KG traversal depth (1–3, default: 1); implies include_associated
388    pub fn with_associated_depth(mut self, depth: u8) -> Self {
389        self.include_associated = true;
390        self.associated_memories_depth = Some(depth);
391        self
392    }
393
394    /// KG-3: set minimum edge weight for KG traversal (default: 0.0)
395    pub fn with_associated_min_weight(mut self, weight: f32) -> Self {
396        self.associated_memories_min_weight = Some(weight);
397        self
398    }
399
400    /// CE-14: set fusion strategy for hybrid recall (server default: `Rrf`)
401    pub fn with_fusion(mut self, fusion: FusionStrategy) -> Self {
402        self.fusion = Some(fusion);
403        self
404    }
405
406    /// CE-17: set explicit vector/BM25 weight for Hybrid routing (0.0–1.0).
407    /// Overrides the adaptive heuristic from `QueryClassifier`.
408    /// Omit for adaptive defaults (recommended for most callers).
409    pub fn with_vector_weight(mut self, weight: f32) -> Self {
410        self.vector_weight = Some(weight);
411        self
412    }
413
414    /// v0.11.0: enable or disable session-adjacent neighborhood enrichment
415    /// (server default: `true`). Set to `false` for latency-sensitive paths.
416    pub fn with_neighborhood(mut self, neighborhood: bool) -> Self {
417        self.neighborhood = Some(neighborhood);
418        self
419    }
420}
421
422/// A recalled memory
423#[derive(Debug, Clone, Serialize)]
424pub struct RecalledMemory {
425    pub id: String,
426    pub content: String,
427    pub memory_type: MemoryType,
428    pub importance: f32,
429    pub score: f32,
430    #[serde(default)]
431    pub tags: Vec<String>,
432    #[serde(skip_serializing_if = "Option::is_none")]
433    pub session_id: Option<String>,
434    #[serde(skip_serializing_if = "Option::is_none")]
435    pub metadata: Option<serde_json::Value>,
436    pub created_at: u64,
437    pub last_accessed_at: u64,
438    pub access_count: u32,
439    /// KG-3: hop depth at which this memory was found (only set on associated memories)
440    #[serde(skip_serializing_if = "Option::is_none")]
441    pub depth: Option<u8>,
442}
443
444impl<'de> serde::Deserialize<'de> for RecalledMemory {
445    fn deserialize<D: serde::Deserializer<'de>>(
446        deserializer: D,
447    ) -> std::result::Result<Self, D::Error> {
448        use serde::de::Error as _;
449        let val = serde_json::Value::deserialize(deserializer)?;
450
451        // Server wraps recall results as {memory:{...}, score, weighted_score, smart_score}.
452        // Fall back to flat format for direct memory-get responses.
453        let score = val
454            .get("score")
455            .and_then(|v| v.as_f64())
456            .or_else(|| val.get("weighted_score").and_then(|v| v.as_f64()))
457            .unwrap_or(0.0) as f32;
458
459        let mem = val.get("memory").unwrap_or(&val);
460
461        let id = mem
462            .get("id")
463            .and_then(|v| v.as_str())
464            .ok_or_else(|| D::Error::missing_field("id"))?
465            .to_string();
466        let content = mem
467            .get("content")
468            .and_then(|v| v.as_str())
469            .ok_or_else(|| D::Error::missing_field("content"))?
470            .to_string();
471        let memory_type: MemoryType = mem
472            .get("memory_type")
473            .and_then(|v| serde_json::from_value(v.clone()).ok())
474            .unwrap_or(MemoryType::Episodic);
475        let importance = mem
476            .get("importance")
477            .and_then(|v| v.as_f64())
478            .unwrap_or(0.5) as f32;
479        let tags: Vec<String> = mem
480            .get("tags")
481            .and_then(|v| serde_json::from_value(v.clone()).ok())
482            .unwrap_or_default();
483        let session_id = mem
484            .get("session_id")
485            .and_then(|v| v.as_str())
486            .map(String::from);
487        let metadata = mem.get("metadata").cloned().filter(|v| !v.is_null());
488        let created_at = mem.get("created_at").and_then(|v| v.as_u64()).unwrap_or(0);
489        let last_accessed_at = mem
490            .get("last_accessed_at")
491            .and_then(|v| v.as_u64())
492            .unwrap_or(0);
493        let access_count = mem
494            .get("access_count")
495            .and_then(|v| v.as_u64())
496            .unwrap_or(0) as u32;
497        let depth = mem.get("depth").and_then(|v| v.as_u64()).map(|v| v as u8);
498
499        Ok(Self {
500            id,
501            content,
502            memory_type,
503            importance,
504            score,
505            tags,
506            session_id,
507            metadata,
508            created_at,
509            last_accessed_at,
510            access_count,
511            depth,
512        })
513    }
514}
515
516/// Recall response
517#[derive(Debug, Clone, Serialize, Deserialize)]
518pub struct RecallResponse {
519    pub memories: Vec<RecalledMemory>,
520    #[serde(default)]
521    pub total_found: usize,
522    /// COG-2 / KG-3: KG associated memories at configurable depth (only present when include_associated was true)
523    #[serde(skip_serializing_if = "Option::is_none")]
524    pub associated_memories: Option<Vec<RecalledMemory>>,
525}
526
527/// Forget (delete) memories request
528#[derive(Debug, Clone, Serialize, Deserialize)]
529pub struct ForgetRequest {
530    pub agent_id: String,
531    #[serde(default)]
532    pub memory_ids: Vec<String>,
533    #[serde(default)]
534    pub tags: Vec<String>,
535    #[serde(skip_serializing_if = "Option::is_none")]
536    pub session_id: Option<String>,
537    #[serde(skip_serializing_if = "Option::is_none")]
538    pub before_timestamp: Option<u64>,
539}
540
541impl ForgetRequest {
542    /// Forget specific memories by ID
543    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
544        Self {
545            agent_id: agent_id.into(),
546            memory_ids: ids,
547            tags: Vec::new(),
548            session_id: None,
549            before_timestamp: None,
550        }
551    }
552
553    /// Forget memories with specific tags
554    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
555        Self {
556            agent_id: agent_id.into(),
557            memory_ids: Vec::new(),
558            tags,
559            session_id: None,
560            before_timestamp: None,
561        }
562    }
563
564    /// Forget all memories in a session
565    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
566        Self {
567            agent_id: agent_id.into(),
568            memory_ids: Vec::new(),
569            tags: Vec::new(),
570            session_id: Some(session_id.into()),
571            before_timestamp: None,
572        }
573    }
574}
575
576/// Forget response
577#[derive(Debug, Clone, Serialize, Deserialize)]
578pub struct ForgetResponse {
579    pub deleted_count: u64,
580}
581
582/// Session start request
583#[derive(Debug, Clone, Serialize, Deserialize)]
584pub struct SessionStartRequest {
585    pub agent_id: String,
586    #[serde(skip_serializing_if = "Option::is_none")]
587    pub metadata: Option<serde_json::Value>,
588}
589
590/// Session information
591#[derive(Debug, Clone, Serialize, Deserialize)]
592pub struct Session {
593    pub id: String,
594    pub agent_id: String,
595    pub started_at: u64,
596    #[serde(skip_serializing_if = "Option::is_none")]
597    pub ended_at: Option<u64>,
598    #[serde(skip_serializing_if = "Option::is_none")]
599    pub summary: Option<String>,
600    #[serde(skip_serializing_if = "Option::is_none")]
601    pub metadata: Option<serde_json::Value>,
602    /// Cached count of memories in this session
603    #[serde(default)]
604    pub memory_count: usize,
605}
606
607/// Session end request
608#[derive(Debug, Clone, Serialize, Deserialize)]
609pub struct SessionEndRequest {
610    #[serde(skip_serializing_if = "Option::is_none")]
611    pub summary: Option<String>,
612}
613
614/// Response from `POST /v1/sessions/start`
615#[derive(Debug, Clone, Serialize, Deserialize)]
616pub struct SessionStartResponse {
617    pub session: Session,
618}
619
620/// Response from `POST /v1/sessions/{id}/end`
621#[derive(Debug, Clone, Serialize, Deserialize)]
622pub struct SessionEndResponse {
623    pub session: Session,
624    pub memory_count: usize,
625}
626
627/// Request to update a memory
628#[derive(Debug, Clone, Serialize, Deserialize)]
629pub struct UpdateMemoryRequest {
630    #[serde(skip_serializing_if = "Option::is_none")]
631    pub content: Option<String>,
632    #[serde(skip_serializing_if = "Option::is_none")]
633    pub metadata: Option<serde_json::Value>,
634    #[serde(skip_serializing_if = "Option::is_none")]
635    pub memory_type: Option<MemoryType>,
636}
637
638/// Request to update memory importance
639#[derive(Debug, Clone, Serialize, Deserialize)]
640pub struct UpdateImportanceRequest {
641    pub memory_ids: Vec<String>,
642    pub importance: f32,
643}
644
645/// DBSCAN algorithm config for adaptive consolidation (CE-6).
646#[derive(Debug, Clone, Serialize, Deserialize, Default)]
647pub struct ConsolidationConfig {
648    /// Clustering algorithm: `"dbscan"` (default) or `"greedy"`.
649    #[serde(skip_serializing_if = "Option::is_none")]
650    pub algorithm: Option<String>,
651    /// Minimum cluster samples for DBSCAN.
652    #[serde(skip_serializing_if = "Option::is_none")]
653    pub min_samples: Option<u32>,
654    /// Epsilon distance parameter for DBSCAN.
655    #[serde(skip_serializing_if = "Option::is_none")]
656    pub eps: Option<f32>,
657}
658
659/// One step in the consolidation execution log (CE-6).
660#[derive(Debug, Clone, Serialize, Deserialize)]
661pub struct ConsolidationLogEntry {
662    pub step: String,
663    pub memories_before: usize,
664    pub memories_after: usize,
665    pub duration_ms: f64,
666}
667
668/// Request to consolidate memories
669#[derive(Debug, Clone, Serialize, Deserialize, Default)]
670pub struct ConsolidateRequest {
671    #[serde(skip_serializing_if = "Option::is_none")]
672    pub memory_type: Option<String>,
673    #[serde(skip_serializing_if = "Option::is_none")]
674    pub threshold: Option<f32>,
675    #[serde(default)]
676    pub dry_run: bool,
677    /// Optional DBSCAN algorithm configuration (CE-6).
678    #[serde(skip_serializing_if = "Option::is_none")]
679    pub config: Option<ConsolidationConfig>,
680}
681
682/// Response from consolidation (`POST /v1/memory/consolidate`).
683///
684/// The server returns `{"memories_removed": N, "source_memory_ids": [...], "consolidated_memory": {...}}`.
685/// `consolidated_count` is mapped from `memories_removed` for backward compat.
686#[derive(Debug, Clone, Serialize)]
687pub struct ConsolidateResponse {
688    /// Number of source memories removed (= `memories_removed` from server)
689    pub consolidated_count: usize,
690    /// Alias for consolidated_count
691    pub removed_count: usize,
692    /// IDs of source memories that were removed
693    #[serde(default)]
694    pub new_memories: Vec<String>,
695    /// Step-by-step consolidation log (CE-6, optional).
696    #[serde(default, skip_serializing_if = "Vec::is_empty")]
697    pub log: Vec<ConsolidationLogEntry>,
698}
699
700impl<'de> serde::Deserialize<'de> for ConsolidateResponse {
701    fn deserialize<D: serde::Deserializer<'de>>(
702        deserializer: D,
703    ) -> std::result::Result<Self, D::Error> {
704        let val = serde_json::Value::deserialize(deserializer)?;
705        // Server format: {"consolidated_memory":{...}, "source_memory_ids":[...], "memories_removed": N}
706        let removed = val
707            .get("memories_removed")
708            .and_then(|v| v.as_u64())
709            .or_else(|| val.get("removed_count").and_then(|v| v.as_u64()))
710            .or_else(|| val.get("consolidated_count").and_then(|v| v.as_u64()))
711            .unwrap_or(0) as usize;
712        let source_ids: Vec<String> = val
713            .get("source_memory_ids")
714            .and_then(|v| v.as_array())
715            .map(|arr| {
716                arr.iter()
717                    .filter_map(|v| v.as_str().map(String::from))
718                    .collect()
719            })
720            .unwrap_or_default();
721        Ok(Self {
722            consolidated_count: removed,
723            removed_count: removed,
724            new_memories: source_ids,
725            log: vec![],
726        })
727    }
728}
729
730// ============================================================================
731// DX-1: Memory Import / Export
732// ============================================================================
733
734/// Response from `POST /v1/import` (DX-1).
735#[derive(Debug, Clone, Serialize, Deserialize)]
736pub struct MemoryImportResponse {
737    pub imported_count: usize,
738    pub skipped_count: usize,
739    #[serde(default)]
740    pub errors: Vec<String>,
741}
742
743/// Response from `GET /v1/export` (DX-1).
744#[derive(Debug, Clone, Serialize, Deserialize)]
745pub struct MemoryExportResponse {
746    pub data: Vec<serde_json::Value>,
747    pub format: String,
748    pub count: usize,
749}
750
751// ============================================================================
752// OBS-1: Business-Event Audit Log
753// ============================================================================
754
755/// A single business-event entry from the audit log (OBS-1).
756#[derive(Debug, Clone, Serialize, Deserialize)]
757pub struct AuditEvent {
758    pub id: String,
759    pub event_type: String,
760    #[serde(skip_serializing_if = "Option::is_none")]
761    pub agent_id: Option<String>,
762    #[serde(skip_serializing_if = "Option::is_none")]
763    pub namespace: Option<String>,
764    pub timestamp: u64,
765    #[serde(default)]
766    pub details: serde_json::Value,
767}
768
769/// Response from `GET /v1/audit` (OBS-1).
770#[derive(Debug, Clone, Serialize, Deserialize)]
771pub struct AuditListResponse {
772    pub events: Vec<AuditEvent>,
773    pub total: usize,
774    #[serde(skip_serializing_if = "Option::is_none")]
775    pub cursor: Option<String>,
776}
777
778/// Response from `POST /v1/audit/export` (OBS-1).
779#[derive(Debug, Clone, Serialize, Deserialize)]
780pub struct AuditExportResponse {
781    pub data: String,
782    pub format: String,
783    pub count: usize,
784}
785
786/// Query parameters for the audit log (OBS-1).
787#[derive(Debug, Clone, Serialize, Deserialize, Default)]
788pub struct AuditQuery {
789    #[serde(skip_serializing_if = "Option::is_none")]
790    pub agent_id: Option<String>,
791    #[serde(skip_serializing_if = "Option::is_none")]
792    pub event_type: Option<String>,
793    #[serde(skip_serializing_if = "Option::is_none")]
794    pub from: Option<u64>,
795    #[serde(skip_serializing_if = "Option::is_none")]
796    pub to: Option<u64>,
797    #[serde(skip_serializing_if = "Option::is_none")]
798    pub limit: Option<u32>,
799    #[serde(skip_serializing_if = "Option::is_none")]
800    pub cursor: Option<String>,
801}
802
803// ============================================================================
804// EXT-1: External Extraction Providers
805// ============================================================================
806
807/// Result from `POST /v1/extract` (EXT-1).
808#[derive(Debug, Clone, Serialize, Deserialize)]
809pub struct ExtractionResult {
810    pub entities: Vec<serde_json::Value>,
811    pub provider: String,
812    #[serde(skip_serializing_if = "Option::is_none")]
813    pub model: Option<String>,
814    pub duration_ms: f64,
815}
816
817/// Metadata for an available extraction provider (EXT-1).
818#[derive(Debug, Clone, Serialize, Deserialize)]
819pub struct ExtractionProviderInfo {
820    pub name: String,
821    pub available: bool,
822    #[serde(default)]
823    pub models: Vec<String>,
824}
825
826/// Response from `GET /v1/extract/providers` (EXT-1).
827#[derive(Debug, Clone, Serialize, Deserialize)]
828#[serde(untagged)]
829pub enum ExtractProvidersResponse {
830    List(Vec<ExtractionProviderInfo>),
831    Object {
832        providers: Vec<ExtractionProviderInfo>,
833    },
834}
835
836// ============================================================================
837// SEC-3: AES-256-GCM Encryption Key Rotation
838// ============================================================================
839
840/// Request body for `POST /v1/admin/encryption/rotate-key` (SEC-3).
841#[derive(Debug, Clone, Serialize, Deserialize)]
842pub struct RotateEncryptionKeyRequest {
843    /// New passphrase or 64-char hex key to rotate to.
844    pub new_key: String,
845    /// If set, rotate only memories in this namespace. Omit to rotate all.
846    #[serde(skip_serializing_if = "Option::is_none")]
847    pub namespace: Option<String>,
848}
849
850/// Response from `POST /v1/admin/encryption/rotate-key` (SEC-3).
851#[derive(Debug, Clone, Serialize, Deserialize)]
852pub struct RotateEncryptionKeyResponse {
853    pub rotated: usize,
854    pub skipped: usize,
855    #[serde(default)]
856    pub namespaces: Vec<String>,
857}
858
859/// Request for memory feedback
860#[derive(Debug, Clone, Serialize, Deserialize)]
861pub struct FeedbackRequest {
862    pub memory_id: String,
863    pub feedback: String,
864    #[serde(skip_serializing_if = "Option::is_none")]
865    pub relevance_score: Option<f32>,
866}
867
868/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
869#[derive(Debug, Clone, Serialize, Deserialize)]
870pub struct LegacyFeedbackResponse {
871    pub status: String,
872    pub updated_importance: Option<f32>,
873}
874
875// ============================================================================
876// CE-2: Batch Recall / Forget Types
877// ============================================================================
878
879/// Filter predicates for batch memory operations (CE-2).
880///
881/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
882/// set (server-side safety guard).
883#[derive(Debug, Clone, Serialize, Deserialize, Default)]
884pub struct BatchMemoryFilter {
885    /// Restrict to memories that carry **all** listed tags.
886    #[serde(skip_serializing_if = "Option::is_none")]
887    pub tags: Option<Vec<String>>,
888    /// Minimum importance (inclusive).
889    #[serde(skip_serializing_if = "Option::is_none")]
890    pub min_importance: Option<f32>,
891    /// Maximum importance (inclusive).
892    #[serde(skip_serializing_if = "Option::is_none")]
893    pub max_importance: Option<f32>,
894    /// Only memories created at or after this Unix timestamp (seconds).
895    #[serde(skip_serializing_if = "Option::is_none")]
896    pub created_after: Option<u64>,
897    /// Only memories created before or at this Unix timestamp (seconds).
898    #[serde(skip_serializing_if = "Option::is_none")]
899    pub created_before: Option<u64>,
900    /// Restrict to a specific memory type.
901    #[serde(skip_serializing_if = "Option::is_none")]
902    pub memory_type: Option<MemoryType>,
903    /// Restrict to memories from a specific session.
904    #[serde(skip_serializing_if = "Option::is_none")]
905    pub session_id: Option<String>,
906}
907
908impl BatchMemoryFilter {
909    /// Convenience: filter by tags.
910    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
911        self.tags = Some(tags);
912        self
913    }
914
915    /// Convenience: filter by minimum importance.
916    pub fn with_min_importance(mut self, min: f32) -> Self {
917        self.min_importance = Some(min);
918        self
919    }
920
921    /// Convenience: filter by maximum importance.
922    pub fn with_max_importance(mut self, max: f32) -> Self {
923        self.max_importance = Some(max);
924        self
925    }
926
927    /// Convenience: filter by session.
928    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
929        self.session_id = Some(session_id.into());
930        self
931    }
932}
933
934/// Request body for `POST /v1/memories/recall/batch`.
935#[derive(Debug, Clone, Serialize, Deserialize)]
936pub struct BatchRecallRequest {
937    /// Agent whose memory namespace to search.
938    pub agent_id: String,
939    /// Filter predicates to apply.
940    #[serde(default)]
941    pub filter: BatchMemoryFilter,
942    /// Maximum number of results to return (default: 100).
943    #[serde(default = "default_batch_limit")]
944    pub limit: usize,
945}
946
947fn default_batch_limit() -> usize {
948    100
949}
950
951impl BatchRecallRequest {
952    /// Create a new batch recall request for an agent.
953    pub fn new(agent_id: impl Into<String>) -> Self {
954        Self {
955            agent_id: agent_id.into(),
956            filter: BatchMemoryFilter::default(),
957            limit: 100,
958        }
959    }
960
961    /// Set filter predicates.
962    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
963        self.filter = filter;
964        self
965    }
966
967    /// Set result limit.
968    pub fn with_limit(mut self, limit: usize) -> Self {
969        self.limit = limit;
970        self
971    }
972}
973
974/// Response from `POST /v1/memories/recall/batch`.
975#[derive(Debug, Clone, Serialize, Deserialize)]
976pub struct BatchRecallResponse {
977    pub memories: Vec<RecalledMemory>,
978    /// Total memories in the agent namespace.
979    pub total: usize,
980    /// Number of memories that passed the filter.
981    pub filtered: usize,
982}
983
984/// Request body for `DELETE /v1/memories/forget/batch`.
985#[derive(Debug, Clone, Serialize, Deserialize)]
986pub struct BatchForgetRequest {
987    /// Agent whose memory namespace to purge from.
988    pub agent_id: String,
989    /// Filter predicates — **at least one must be set** (server safety guard).
990    pub filter: BatchMemoryFilter,
991}
992
993impl BatchForgetRequest {
994    /// Create a new batch forget request with the given filter.
995    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
996        Self {
997            agent_id: agent_id.into(),
998            filter,
999        }
1000    }
1001}
1002
1003/// Response from `DELETE /v1/memories/forget/batch`.
1004#[derive(Debug, Clone, Serialize, Deserialize)]
1005pub struct BatchForgetResponse {
1006    pub deleted_count: usize,
1007}
1008
1009// ============================================================================
1010// Memory Client Methods
1011// ============================================================================
1012
1013impl DakeraClient {
1014    // ========================================================================
1015    // Memory Operations
1016    // ========================================================================
1017
1018    /// Store a memory for an agent
1019    ///
1020    /// # Example
1021    ///
1022    /// ```rust,no_run
1023    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
1024    ///
1025    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1026    /// let client = DakeraClient::new("http://localhost:3000")?;
1027    ///
1028    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
1029    ///     .with_importance(0.8)
1030    ///     .with_tags(vec!["preferences".to_string()]);
1031    ///
1032    /// let response = client.store_memory(request).await?;
1033    /// println!("Stored memory: {}", response.memory_id);
1034    /// # Ok(())
1035    /// # }
1036    /// ```
1037    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
1038        let url = format!("{}/v1/memory/store", self.base_url);
1039        let response = self.client.post(&url).json(&request).send().await?;
1040        self.handle_response(response).await
1041    }
1042
1043    /// Recall memories by semantic query
1044    ///
1045    /// # Example
1046    ///
1047    /// ```rust,no_run
1048    /// use dakera_client::{DakeraClient, memory::RecallRequest};
1049    ///
1050    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1051    /// let client = DakeraClient::new("http://localhost:3000")?;
1052    ///
1053    /// let request = RecallRequest::new("agent-1", "user preferences")
1054    ///     .with_top_k(10);
1055    ///
1056    /// let response = client.recall(request).await?;
1057    /// for memory in response.memories {
1058    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
1059    /// }
1060    /// # Ok(())
1061    /// # }
1062    /// ```
1063    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
1064        let url = format!("{}/v1/memory/recall", self.base_url);
1065        let response = self.client.post(&url).json(&request).send().await?;
1066        self.handle_response(response).await
1067    }
1068
1069    /// Simple recall with just agent_id and query (convenience method)
1070    pub async fn recall_simple(
1071        &self,
1072        agent_id: &str,
1073        query: &str,
1074        top_k: usize,
1075    ) -> Result<RecallResponse> {
1076        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
1077            .await
1078    }
1079
1080    /// Get a specific memory by ID
1081    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
1082        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
1083        let response = self.client.get(&url).send().await?;
1084        self.handle_response(response).await
1085    }
1086
1087    /// Forget (delete) memories
1088    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
1089        let url = format!("{}/v1/memory/forget", self.base_url);
1090        let response = self.client.post(&url).json(&request).send().await?;
1091        self.handle_response(response).await
1092    }
1093
1094    /// Search memories with advanced filters
1095    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
1096        let url = format!("{}/v1/memory/search", self.base_url);
1097        let response = self.client.post(&url).json(&request).send().await?;
1098        self.handle_response(response).await
1099    }
1100
1101    /// Update an existing memory
1102    pub async fn update_memory(
1103        &self,
1104        agent_id: &str,
1105        memory_id: &str,
1106        request: UpdateMemoryRequest,
1107    ) -> Result<StoreMemoryResponse> {
1108        let url = format!(
1109            "{}/v1/agents/{}/memories/{}",
1110            self.base_url, agent_id, memory_id
1111        );
1112        let response = self.client.put(&url).json(&request).send().await?;
1113        self.handle_response(response).await
1114    }
1115
1116    /// Update importance of memories
1117    pub async fn update_importance(
1118        &self,
1119        agent_id: &str,
1120        request: UpdateImportanceRequest,
1121    ) -> Result<serde_json::Value> {
1122        let url = format!(
1123            "{}/v1/agents/{}/memories/importance",
1124            self.base_url, agent_id
1125        );
1126        let response = self.client.put(&url).json(&request).send().await?;
1127        self.handle_response(response).await
1128    }
1129
1130    /// Consolidate memories for an agent
1131    pub async fn consolidate(
1132        &self,
1133        agent_id: &str,
1134        request: ConsolidateRequest,
1135    ) -> Result<ConsolidateResponse> {
1136        // Server endpoint: POST /v1/memory/consolidate with agent_id in body
1137        let url = format!("{}/v1/memory/consolidate", self.base_url);
1138        let mut body = serde_json::to_value(&request)?;
1139        body["agent_id"] = serde_json::Value::String(agent_id.to_string());
1140        let response = self.client.post(&url).json(&body).send().await?;
1141        self.handle_response(response).await
1142    }
1143
1144    /// Submit feedback on a memory recall
1145    pub async fn memory_feedback(
1146        &self,
1147        agent_id: &str,
1148        request: FeedbackRequest,
1149    ) -> Result<LegacyFeedbackResponse> {
1150        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
1151        let response = self.client.post(&url).json(&request).send().await?;
1152        self.handle_response(response).await
1153    }
1154
1155    // ========================================================================
1156    // Memory Feedback Loop — INT-1
1157    // ========================================================================
1158
1159    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
1160    ///
1161    /// # Arguments
1162    /// * `memory_id` – The memory to give feedback on.
1163    /// * `agent_id` – The agent that owns the memory.
1164    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
1165    ///
1166    /// # Example
1167    /// ```no_run
1168    /// # use dakera_client::{DakeraClient, FeedbackSignal};
1169    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1170    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
1171    /// println!("new importance: {}", resp.new_importance);
1172    /// # Ok(()) }
1173    /// ```
1174    pub async fn feedback_memory(
1175        &self,
1176        memory_id: &str,
1177        agent_id: &str,
1178        signal: FeedbackSignal,
1179    ) -> Result<FeedbackResponse> {
1180        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1181        let body = MemoryFeedbackBody {
1182            agent_id: agent_id.to_string(),
1183            signal,
1184        };
1185        let response = self.client.post(&url).json(&body).send().await?;
1186        self.handle_response(response).await
1187    }
1188
1189    /// Get the full feedback history for a memory (INT-1).
1190    pub async fn get_memory_feedback_history(
1191        &self,
1192        memory_id: &str,
1193    ) -> Result<FeedbackHistoryResponse> {
1194        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1195        let response = self.client.get(&url).send().await?;
1196        self.handle_response(response).await
1197    }
1198
1199    /// Get aggregate feedback counts and health score for an agent (INT-1).
1200    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
1201        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
1202        let response = self.client.get(&url).send().await?;
1203        self.handle_response(response).await
1204    }
1205
1206    /// Directly override a memory's importance score (INT-1).
1207    ///
1208    /// # Arguments
1209    /// * `memory_id` – The memory to update.
1210    /// * `agent_id` – The agent that owns the memory.
1211    /// * `importance` – New importance value (0.0–1.0).
1212    pub async fn patch_memory_importance(
1213        &self,
1214        memory_id: &str,
1215        agent_id: &str,
1216        importance: f32,
1217    ) -> Result<FeedbackResponse> {
1218        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
1219        let body = MemoryImportancePatch {
1220            agent_id: agent_id.to_string(),
1221            importance,
1222        };
1223        let response = self.client.patch(&url).json(&body).send().await?;
1224        self.handle_response(response).await
1225    }
1226
1227    /// Get overall feedback health score for an agent (INT-1).
1228    ///
1229    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
1230    /// A higher score indicates a healthier, more relevant memory store.
1231    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
1232        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
1233        let response = self.client.get(&url).send().await?;
1234        self.handle_response(response).await
1235    }
1236
1237    // ========================================================================
1238    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
1239    // ========================================================================
1240
1241    /// Traverse the knowledge graph from a memory node.
1242    ///
1243    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1244    ///
1245    /// # Arguments
1246    /// * `memory_id` – Root memory ID to start traversal from.
1247    /// * `options` – Traversal options (depth, edge type filters).
1248    ///
1249    /// # Example
1250    /// ```no_run
1251    /// # use dakera_client::{DakeraClient, GraphOptions};
1252    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1253    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
1254    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
1255    /// # Ok(()) }
1256    /// ```
1257    pub async fn memory_graph(
1258        &self,
1259        memory_id: &str,
1260        options: GraphOptions,
1261    ) -> Result<MemoryGraph> {
1262        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
1263        let depth = options.depth.unwrap_or(1);
1264        url.push_str(&format!("?depth={}", depth));
1265        if let Some(types) = &options.types {
1266            let type_strs: Vec<String> = types
1267                .iter()
1268                .map(|t| {
1269                    serde_json::to_value(t)
1270                        .unwrap()
1271                        .as_str()
1272                        .unwrap_or("")
1273                        .to_string()
1274                })
1275                .collect();
1276            if !type_strs.is_empty() {
1277                url.push_str(&format!("&types={}", type_strs.join(",")));
1278            }
1279        }
1280        let response = self.client.get(&url).send().await?;
1281        self.handle_response(response).await
1282    }
1283
1284    /// Find the shortest path between two memories in the knowledge graph.
1285    ///
1286    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1287    ///
1288    /// # Example
1289    /// ```no_run
1290    /// # use dakera_client::DakeraClient;
1291    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1292    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
1293    /// println!("{} hops: {:?}", path.hops, path.path);
1294    /// # Ok(()) }
1295    /// ```
1296    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
1297        let url = format!(
1298            "{}/v1/memories/{}/path?target={}",
1299            self.base_url,
1300            source_id,
1301            urlencoding::encode(target_id)
1302        );
1303        let response = self.client.get(&url).send().await?;
1304        self.handle_response(response).await
1305    }
1306
1307    /// Create an explicit edge between two memories.
1308    ///
1309    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1310    ///
1311    /// # Example
1312    /// ```no_run
1313    /// # use dakera_client::{DakeraClient, EdgeType};
1314    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1315    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
1316    /// println!("Created edge: {}", resp.edge.id);
1317    /// # Ok(()) }
1318    /// ```
1319    pub async fn memory_link(
1320        &self,
1321        source_id: &str,
1322        target_id: &str,
1323        edge_type: EdgeType,
1324    ) -> Result<GraphLinkResponse> {
1325        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
1326        let request = GraphLinkRequest {
1327            target_id: target_id.to_string(),
1328            edge_type,
1329        };
1330        let response = self.client.post(&url).json(&request).send().await?;
1331        self.handle_response(response).await
1332    }
1333
1334    /// Export the full knowledge graph for an agent.
1335    ///
1336    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1337    ///
1338    /// # Arguments
1339    /// * `agent_id` – Agent whose graph to export.
1340    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
1341    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
1342        let url = format!(
1343            "{}/v1/agents/{}/graph/export?format={}",
1344            self.base_url, agent_id, format
1345        );
1346        let response = self.client.get(&url).send().await?;
1347        self.handle_response(response).await
1348    }
1349
1350    // ========================================================================
1351    // Session Operations
1352    // ========================================================================
1353
1354    /// Start a new session for an agent
1355    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
1356        let url = format!("{}/v1/sessions/start", self.base_url);
1357        let request = SessionStartRequest {
1358            agent_id: agent_id.to_string(),
1359            metadata: None,
1360        };
1361        let response = self.client.post(&url).json(&request).send().await?;
1362        let resp: SessionStartResponse = self.handle_response(response).await?;
1363        Ok(resp.session)
1364    }
1365
1366    /// Start a session with metadata
1367    pub async fn start_session_with_metadata(
1368        &self,
1369        agent_id: &str,
1370        metadata: serde_json::Value,
1371    ) -> Result<Session> {
1372        let url = format!("{}/v1/sessions/start", self.base_url);
1373        let request = SessionStartRequest {
1374            agent_id: agent_id.to_string(),
1375            metadata: Some(metadata),
1376        };
1377        let response = self.client.post(&url).json(&request).send().await?;
1378        let resp: SessionStartResponse = self.handle_response(response).await?;
1379        Ok(resp.session)
1380    }
1381
1382    /// End a session, optionally with a summary.
1383    /// Returns the session state and the total memory count at close.
1384    pub async fn end_session(
1385        &self,
1386        session_id: &str,
1387        summary: Option<String>,
1388    ) -> Result<SessionEndResponse> {
1389        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
1390        let request = SessionEndRequest { summary };
1391        let response = self.client.post(&url).json(&request).send().await?;
1392        self.handle_response(response).await
1393    }
1394
1395    /// Get a session by ID
1396    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
1397        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
1398        let response = self.client.get(&url).send().await?;
1399        self.handle_response(response).await
1400    }
1401
1402    /// List sessions for an agent
1403    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
1404        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
1405        let response = self.client.get(&url).send().await?;
1406        self.handle_response(response).await
1407    }
1408
1409    /// Get memories in a session
1410    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
1411        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
1412        let response = self.client.get(&url).send().await?;
1413        self.handle_response(response).await
1414    }
1415
1416    // ========================================================================
1417    // CE-2: Batch Recall / Forget
1418    // ========================================================================
1419
1420    /// Bulk-recall memories using filter predicates (CE-2).
1421    ///
1422    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
1423    ///
1424    /// # Example
1425    ///
1426    /// ```rust,no_run
1427    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
1428    ///
1429    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1430    /// let client = DakeraClient::new("http://localhost:3000")?;
1431    ///
1432    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
1433    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
1434    /// let resp = client.batch_recall(req).await?;
1435    /// println!("Found {} memories", resp.filtered);
1436    /// # Ok(())
1437    /// # }
1438    /// ```
1439    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
1440        let url = format!("{}/v1/memories/recall/batch", self.base_url);
1441        let response = self.client.post(&url).json(&request).send().await?;
1442        self.handle_response(response).await
1443    }
1444
1445    /// Bulk-delete memories using filter predicates (CE-2).
1446    ///
1447    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
1448    /// one filter predicate to be set as a safety guard.
1449    ///
1450    /// # Example
1451    ///
1452    /// ```rust,no_run
1453    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
1454    ///
1455    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1456    /// let client = DakeraClient::new("http://localhost:3000")?;
1457    ///
1458    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
1459    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
1460    /// println!("Deleted {} memories", resp.deleted_count);
1461    /// # Ok(())
1462    /// # }
1463    /// ```
1464    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
1465        let url = format!("{}/v1/memories/forget/batch", self.base_url);
1466        let response = self.client.delete(&url).json(&request).send().await?;
1467        self.handle_response(response).await
1468    }
1469
1470    // ========================================================================
1471    // DX-1: Memory Import / Export
1472    // ========================================================================
1473
1474    /// Import memories from an external format (DX-1).
1475    ///
1476    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1477    ///
1478    /// ```no_run
1479    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1480    /// let client = dakera_client::DakeraClient::new("http://localhost:3000")?;
1481    /// let data = serde_json::json!([{"content": "hello", "agent_id": "agent-1"}]);
1482    /// let resp = client.import_memories(data, "jsonl", None, None).await?;
1483    /// println!("Imported {} memories", resp.imported_count);
1484    /// # Ok(())
1485    /// # }
1486    /// ```
1487    pub async fn import_memories(
1488        &self,
1489        data: serde_json::Value,
1490        format: &str,
1491        agent_id: Option<&str>,
1492        namespace: Option<&str>,
1493    ) -> Result<MemoryImportResponse> {
1494        let mut body = serde_json::json!({"data": data, "format": format});
1495        if let Some(aid) = agent_id {
1496            body["agent_id"] = serde_json::Value::String(aid.to_string());
1497        }
1498        if let Some(ns) = namespace {
1499            body["namespace"] = serde_json::Value::String(ns.to_string());
1500        }
1501        let url = format!("{}/v1/import", self.base_url);
1502        let response = self.client.post(&url).json(&body).send().await?;
1503        self.handle_response(response).await
1504    }
1505
1506    /// Export memories in a portable format (DX-1).
1507    ///
1508    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1509    pub async fn export_memories(
1510        &self,
1511        format: &str,
1512        agent_id: Option<&str>,
1513        namespace: Option<&str>,
1514        limit: Option<u32>,
1515    ) -> Result<MemoryExportResponse> {
1516        let mut params = vec![("format", format.to_string())];
1517        if let Some(aid) = agent_id {
1518            params.push(("agent_id", aid.to_string()));
1519        }
1520        if let Some(ns) = namespace {
1521            params.push(("namespace", ns.to_string()));
1522        }
1523        if let Some(l) = limit {
1524            params.push(("limit", l.to_string()));
1525        }
1526        let url = format!("{}/v1/export", self.base_url);
1527        let response = self.client.get(&url).query(&params).send().await?;
1528        self.handle_response(response).await
1529    }
1530
1531    // ========================================================================
1532    // OBS-1: Business-Event Audit Log
1533    // ========================================================================
1534
1535    /// List paginated audit log entries (OBS-1).
1536    pub async fn list_audit_events(&self, query: AuditQuery) -> Result<AuditListResponse> {
1537        let url = format!("{}/v1/audit", self.base_url);
1538        let response = self.client.get(&url).query(&query).send().await?;
1539        self.handle_response(response).await
1540    }
1541
1542    /// Stream live audit events via SSE (OBS-1).
1543    ///
1544    /// Returns a [`tokio::sync::mpsc::Receiver`] that yields [`DakeraEvent`] results.
1545    pub async fn stream_audit_events(
1546        &self,
1547        agent_id: Option<&str>,
1548        event_type: Option<&str>,
1549    ) -> Result<tokio::sync::mpsc::Receiver<Result<crate::events::DakeraEvent>>> {
1550        let mut params: Vec<(&str, String)> = Vec::new();
1551        if let Some(aid) = agent_id {
1552            params.push(("agent_id", aid.to_string()));
1553        }
1554        if let Some(et) = event_type {
1555            params.push(("event_type", et.to_string()));
1556        }
1557        let base = format!("{}/v1/audit/stream", self.base_url);
1558        let url = if params.is_empty() {
1559            base
1560        } else {
1561            let qs = params
1562                .iter()
1563                .map(|(k, v)| format!("{}={}", k, urlencoding::encode(v)))
1564                .collect::<Vec<_>>()
1565                .join("&");
1566            format!("{}?{}", base, qs)
1567        };
1568        self.stream_sse(url).await
1569    }
1570
1571    /// Bulk-export audit log entries (OBS-1).
1572    pub async fn export_audit(
1573        &self,
1574        format: &str,
1575        agent_id: Option<&str>,
1576        event_type: Option<&str>,
1577        from_ts: Option<u64>,
1578        to_ts: Option<u64>,
1579    ) -> Result<AuditExportResponse> {
1580        let mut body = serde_json::json!({"format": format});
1581        if let Some(aid) = agent_id {
1582            body["agent_id"] = serde_json::Value::String(aid.to_string());
1583        }
1584        if let Some(et) = event_type {
1585            body["event_type"] = serde_json::Value::String(et.to_string());
1586        }
1587        if let Some(f) = from_ts {
1588            body["from"] = serde_json::Value::Number(f.into());
1589        }
1590        if let Some(t) = to_ts {
1591            body["to"] = serde_json::Value::Number(t.into());
1592        }
1593        let url = format!("{}/v1/audit/export", self.base_url);
1594        let response = self.client.post(&url).json(&body).send().await?;
1595        self.handle_response(response).await
1596    }
1597
1598    // ========================================================================
1599    // EXT-1: External Extraction Providers
1600    // ========================================================================
1601
1602    /// Extract entities from text using a pluggable provider (EXT-1).
1603    ///
1604    /// Provider hierarchy: per-request > namespace default > GLiNER (bundled).
1605    /// Supported providers: `"gliner"`, `"openai"`, `"anthropic"`, `"openrouter"`, `"ollama"`.
1606    pub async fn extract_text(
1607        &self,
1608        text: &str,
1609        namespace: Option<&str>,
1610        provider: Option<&str>,
1611        model: Option<&str>,
1612    ) -> Result<ExtractionResult> {
1613        let mut body = serde_json::json!({"text": text});
1614        if let Some(ns) = namespace {
1615            body["namespace"] = serde_json::Value::String(ns.to_string());
1616        }
1617        if let Some(p) = provider {
1618            body["provider"] = serde_json::Value::String(p.to_string());
1619        }
1620        if let Some(m) = model {
1621            body["model"] = serde_json::Value::String(m.to_string());
1622        }
1623        let url = format!("{}/v1/extract", self.base_url);
1624        let response = self.client.post(&url).json(&body).send().await?;
1625        self.handle_response(response).await
1626    }
1627
1628    /// List available extraction providers and their models (EXT-1).
1629    pub async fn list_extract_providers(&self) -> Result<Vec<ExtractionProviderInfo>> {
1630        let url = format!("{}/v1/extract/providers", self.base_url);
1631        let response = self.client.get(&url).send().await?;
1632        let result: ExtractProvidersResponse = self.handle_response(response).await?;
1633        Ok(match result {
1634            ExtractProvidersResponse::List(v) => v,
1635            ExtractProvidersResponse::Object { providers } => providers,
1636        })
1637    }
1638
1639    /// Set the default extraction provider for a namespace (EXT-1).
1640    pub async fn configure_namespace_extractor(
1641        &self,
1642        namespace: &str,
1643        provider: &str,
1644        model: Option<&str>,
1645    ) -> Result<serde_json::Value> {
1646        let mut body = serde_json::json!({"provider": provider});
1647        if let Some(m) = model {
1648            body["model"] = serde_json::Value::String(m.to_string());
1649        }
1650        let url = format!(
1651            "{}/v1/namespaces/{}/extractor",
1652            self.base_url,
1653            urlencoding::encode(namespace)
1654        );
1655        let response = self.client.patch(&url).json(&body).send().await?;
1656        self.handle_response(response).await
1657    }
1658
1659    // =========================================================================
1660    // SEC-3: AES-256-GCM Encryption Key Rotation
1661    // =========================================================================
1662
1663    /// Re-encrypt all memory content blobs with a new AES-256-GCM key (SEC-3).
1664    ///
1665    /// After this call the new key is active in the running process.
1666    /// The operator must update `DAKERA_ENCRYPTION_KEY` and restart to make
1667    /// the rotation durable across restarts.
1668    ///
1669    /// Requires Admin scope.
1670    ///
1671    /// # Arguments
1672    /// * `new_key` - New passphrase or 64-char hex key.
1673    /// * `namespace` - If `Some`, rotate only this namespace. `None` rotates all.
1674    pub async fn rotate_encryption_key(
1675        &self,
1676        new_key: &str,
1677        namespace: Option<&str>,
1678    ) -> Result<RotateEncryptionKeyResponse> {
1679        let body = RotateEncryptionKeyRequest {
1680            new_key: new_key.to_string(),
1681            namespace: namespace.map(|s| s.to_string()),
1682        };
1683        let url = format!("{}/v1/admin/encryption/rotate-key", self.base_url);
1684        let response = self.client.post(&url).json(&body).send().await?;
1685        self.handle_response(response).await
1686    }
1687}