Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22#[serde(rename_all = "lowercase")]
23pub enum MemoryType {
24    #[default]
25    Episodic,
26    Semantic,
27    Procedural,
28    Working,
29}
30
31/// Store a memory request
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct StoreMemoryRequest {
34    pub agent_id: String,
35    pub content: String,
36    #[serde(default)]
37    pub memory_type: MemoryType,
38    #[serde(default = "default_importance")]
39    pub importance: f32,
40    #[serde(default)]
41    pub tags: Vec<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub session_id: Option<String>,
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub metadata: Option<serde_json::Value>,
46    /// Optional TTL in seconds. The memory is hard-deleted after this many
47    /// seconds from creation.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub ttl_seconds: Option<u64>,
50    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
51    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
52    /// decay engine on expiry (DECAY-3).
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub expires_at: Option<u64>,
55}
56
57fn default_importance() -> f32 {
58    0.5
59}
60
61impl StoreMemoryRequest {
62    /// Create a new store memory request
63    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
64        Self {
65            agent_id: agent_id.into(),
66            content: content.into(),
67            memory_type: MemoryType::default(),
68            importance: 0.5,
69            tags: Vec::new(),
70            session_id: None,
71            metadata: None,
72            ttl_seconds: None,
73            expires_at: None,
74        }
75    }
76
77    /// Set memory type
78    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
79        self.memory_type = memory_type;
80        self
81    }
82
83    /// Set importance score
84    pub fn with_importance(mut self, importance: f32) -> Self {
85        self.importance = importance.clamp(0.0, 1.0);
86        self
87    }
88
89    /// Set tags
90    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
91        self.tags = tags;
92        self
93    }
94
95    /// Set session ID
96    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
97        self.session_id = Some(session_id.into());
98        self
99    }
100
101    /// Set metadata
102    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
103        self.metadata = Some(metadata);
104        self
105    }
106
107    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
108    /// from creation.
109    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
110        self.ttl_seconds = Some(ttl_seconds);
111        self
112    }
113
114    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
115    /// `ttl_seconds` when both are set (DECAY-3).
116    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
117        self.expires_at = Some(expires_at);
118        self
119    }
120}
121
122/// Stored memory response from `POST /v1/memory/store`.
123///
124/// The server wraps the memory in a nested `memory` object:
125/// `{"memory": {"id": "...", "agent_id": "...", ...}, "embedding_time_ms": N}`.
126/// The `memory_id` and `agent_id` fields are convenience accessors mapped from
127/// `memory.id` and `memory.agent_id` respectively.
128#[derive(Debug, Clone, Serialize)]
129pub struct StoreMemoryResponse {
130    /// Memory ID (mapped from `memory.id`)
131    pub memory_id: String,
132    /// Agent ID (mapped from `memory.agent_id`)
133    pub agent_id: String,
134    /// Namespace (mapped from `memory.namespace`, defaults to `"default"`)
135    pub namespace: String,
136    /// Embedding latency in milliseconds
137    pub embedding_time_ms: Option<u64>,
138}
139
140impl<'de> serde::Deserialize<'de> for StoreMemoryResponse {
141    fn deserialize<D: serde::Deserializer<'de>>(
142        deserializer: D,
143    ) -> std::result::Result<Self, D::Error> {
144        use serde::de::Error;
145        let val = serde_json::Value::deserialize(deserializer)?;
146
147        // Server response: {"memory": {"id":"...","agent_id":"...",...}, "embedding_time_ms": N}
148        if let Some(memory) = val.get("memory") {
149            let memory_id = memory
150                .get("id")
151                .and_then(|v| v.as_str())
152                .ok_or_else(|| D::Error::missing_field("memory.id"))?
153                .to_string();
154            let agent_id = memory
155                .get("agent_id")
156                .and_then(|v| v.as_str())
157                .unwrap_or("")
158                .to_string();
159            let namespace = memory
160                .get("namespace")
161                .and_then(|v| v.as_str())
162                .unwrap_or("default")
163                .to_string();
164            let embedding_time_ms = val.get("embedding_time_ms").and_then(|v| v.as_u64());
165            return Ok(Self {
166                memory_id,
167                agent_id,
168                namespace,
169                embedding_time_ms,
170            });
171        }
172
173        // Legacy / mock format: {"memory_id":"...","agent_id":"...","namespace":"..."}
174        let memory_id = val
175            .get("memory_id")
176            .and_then(|v| v.as_str())
177            .ok_or_else(|| D::Error::missing_field("memory_id"))?
178            .to_string();
179        let agent_id = val
180            .get("agent_id")
181            .and_then(|v| v.as_str())
182            .unwrap_or("")
183            .to_string();
184        let namespace = val
185            .get("namespace")
186            .and_then(|v| v.as_str())
187            .unwrap_or("default")
188            .to_string();
189        Ok(Self {
190            memory_id,
191            agent_id,
192            namespace,
193            embedding_time_ms: None,
194        })
195    }
196}
197
198/// Fusion strategy for hybrid recall (CE-14).
199///
200/// Controls how vector and BM25 scores are combined when `routing = Hybrid`.
201/// `Rrf` (default) uses Reciprocal Rank Fusion (Cormack et al., SIGIR 2009).
202#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
203#[serde(rename_all = "snake_case")]
204pub enum FusionStrategy {
205    /// Reciprocal Rank Fusion — default, best for recall tasks.
206    /// Formula: score(d) = Σ 1 / (k + rank(d)), k = 60.
207    #[default]
208    Rrf,
209    /// Legacy weighted min-max normalization.
210    MinMax,
211}
212
213/// Retrieval routing mode for recall and search (CE-10).
214///
215/// Controls which retrieval index the server uses. `Auto` (default) lets the
216/// server pick the best strategy based on the query.
217#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
218#[serde(rename_all = "snake_case")]
219pub enum RoutingMode {
220    /// Server picks the best strategy (default).
221    Auto,
222    /// Force ANN vector search (HNSW).
223    Vector,
224    /// Force BM25 full-text search.
225    Bm25,
226    /// Fuse ANN and BM25 scores (RRF).
227    Hybrid,
228}
229
230/// Recall memories request
231#[derive(Debug, Clone, Serialize, Deserialize)]
232pub struct RecallRequest {
233    pub agent_id: String,
234    pub query: String,
235    #[serde(default = "default_top_k")]
236    pub top_k: usize,
237    #[serde(skip_serializing_if = "Option::is_none")]
238    pub memory_type: Option<MemoryType>,
239    #[serde(default)]
240    pub min_importance: f32,
241    #[serde(skip_serializing_if = "Option::is_none")]
242    pub session_id: Option<String>,
243    #[serde(default)]
244    pub tags: Vec<String>,
245    /// COG-2: traverse KG depth-1 from recalled memories and include
246    /// associatively linked memories in the response (default: false)
247    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
248    pub include_associated: bool,
249    /// COG-2: max associated memories to return (default: 10, max: 10)
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub associated_memories_cap: Option<u32>,
252    /// KG-3: KG traversal depth 1–3 (default: 1); requires include_associated
253    #[serde(skip_serializing_if = "Option::is_none")]
254    pub associated_memories_depth: Option<u8>,
255    /// KG-3: minimum edge weight for KG traversal (default: 0.0)
256    #[serde(skip_serializing_if = "Option::is_none")]
257    pub associated_memories_min_weight: Option<f32>,
258    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
259    #[serde(skip_serializing_if = "Option::is_none")]
260    pub since: Option<String>,
261    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
262    #[serde(skip_serializing_if = "Option::is_none")]
263    pub until: Option<String>,
264    /// CE-10: retrieval routing mode. `None` uses the server default (`auto`).
265    #[serde(skip_serializing_if = "Option::is_none")]
266    pub routing: Option<RoutingMode>,
267    /// CE-13: cross-encoder reranking. `None` uses server default (`true` for recall,
268    /// `false` for search). Set to `Some(false)` to disable on latency-sensitive paths.
269    #[serde(skip_serializing_if = "Option::is_none")]
270    pub rerank: Option<bool>,
271    /// CE-14: fusion strategy when `routing = Hybrid`. `None` uses server default (`Rrf`).
272    #[serde(skip_serializing_if = "Option::is_none")]
273    pub fusion: Option<FusionStrategy>,
274    /// v0.11.0: fetch session-adjacent memories within ±5 min of each top result.
275    /// `None` uses server default (`true`). Set to `Some(false)` to disable for
276    /// latency-sensitive paths.
277    #[serde(skip_serializing_if = "Option::is_none")]
278    pub neighborhood: Option<bool>,
279}
280
281fn default_top_k() -> usize {
282    5
283}
284
285impl RecallRequest {
286    /// Create a new recall request
287    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
288        Self {
289            agent_id: agent_id.into(),
290            query: query.into(),
291            top_k: 5,
292            memory_type: None,
293            min_importance: 0.0,
294            session_id: None,
295            tags: Vec::new(),
296            include_associated: false,
297            associated_memories_cap: None,
298            associated_memories_depth: None,
299            associated_memories_min_weight: None,
300            since: None,
301            until: None,
302            routing: None,
303            rerank: None,
304            fusion: None,
305            neighborhood: None,
306        }
307    }
308
309    /// Set number of results
310    pub fn with_top_k(mut self, top_k: usize) -> Self {
311        self.top_k = top_k;
312        self
313    }
314
315    /// Filter by memory type
316    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
317        self.memory_type = Some(memory_type);
318        self
319    }
320
321    /// Set minimum importance threshold
322    pub fn with_min_importance(mut self, min: f32) -> Self {
323        self.min_importance = min;
324        self
325    }
326
327    /// Filter by session
328    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
329        self.session_id = Some(session_id.into());
330        self
331    }
332
333    /// Filter by tags
334    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
335        self.tags = tags;
336        self
337    }
338
339    /// COG-2: include KG depth-1 associated memories in the response
340    pub fn with_associated(mut self) -> Self {
341        self.include_associated = true;
342        self
343    }
344
345    /// COG-2: set max associated memories cap (default: 10, max: 10)
346    pub fn with_associated_cap(mut self, cap: u32) -> Self {
347        self.include_associated = true;
348        self.associated_memories_cap = Some(cap);
349        self
350    }
351
352    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
353    pub fn with_since(mut self, since: impl Into<String>) -> Self {
354        self.since = Some(since.into());
355        self
356    }
357
358    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
359    pub fn with_until(mut self, until: impl Into<String>) -> Self {
360        self.until = Some(until.into());
361        self
362    }
363
364    /// CE-10: set retrieval routing mode
365    pub fn with_routing(mut self, routing: RoutingMode) -> Self {
366        self.routing = Some(routing);
367        self
368    }
369
370    /// CE-13: enable or disable cross-encoder reranking (server default: true for recall)
371    pub fn with_rerank(mut self, rerank: bool) -> Self {
372        self.rerank = Some(rerank);
373        self
374    }
375
376    /// KG-3: set KG traversal depth (1–3, default: 1); implies include_associated
377    pub fn with_associated_depth(mut self, depth: u8) -> Self {
378        self.include_associated = true;
379        self.associated_memories_depth = Some(depth);
380        self
381    }
382
383    /// KG-3: set minimum edge weight for KG traversal (default: 0.0)
384    pub fn with_associated_min_weight(mut self, weight: f32) -> Self {
385        self.associated_memories_min_weight = Some(weight);
386        self
387    }
388
389    /// CE-14: set fusion strategy for hybrid recall (server default: `Rrf`)
390    pub fn with_fusion(mut self, fusion: FusionStrategy) -> Self {
391        self.fusion = Some(fusion);
392        self
393    }
394
395    /// v0.11.0: enable or disable session-adjacent neighborhood enrichment
396    /// (server default: `true`). Set to `false` for latency-sensitive paths.
397    pub fn with_neighborhood(mut self, neighborhood: bool) -> Self {
398        self.neighborhood = Some(neighborhood);
399        self
400    }
401}
402
403/// A recalled memory
404#[derive(Debug, Clone, Serialize)]
405pub struct RecalledMemory {
406    pub id: String,
407    pub content: String,
408    pub memory_type: MemoryType,
409    pub importance: f32,
410    pub score: f32,
411    #[serde(default)]
412    pub tags: Vec<String>,
413    #[serde(skip_serializing_if = "Option::is_none")]
414    pub session_id: Option<String>,
415    #[serde(skip_serializing_if = "Option::is_none")]
416    pub metadata: Option<serde_json::Value>,
417    pub created_at: u64,
418    pub last_accessed_at: u64,
419    pub access_count: u32,
420    /// KG-3: hop depth at which this memory was found (only set on associated memories)
421    #[serde(skip_serializing_if = "Option::is_none")]
422    pub depth: Option<u8>,
423}
424
425impl<'de> serde::Deserialize<'de> for RecalledMemory {
426    fn deserialize<D: serde::Deserializer<'de>>(
427        deserializer: D,
428    ) -> std::result::Result<Self, D::Error> {
429        use serde::de::Error as _;
430        let val = serde_json::Value::deserialize(deserializer)?;
431
432        // Server wraps recall results as {memory:{...}, score, weighted_score, smart_score}.
433        // Fall back to flat format for direct memory-get responses.
434        let score = val
435            .get("score")
436            .and_then(|v| v.as_f64())
437            .or_else(|| val.get("weighted_score").and_then(|v| v.as_f64()))
438            .unwrap_or(0.0) as f32;
439
440        let mem = val.get("memory").unwrap_or(&val);
441
442        let id = mem
443            .get("id")
444            .and_then(|v| v.as_str())
445            .ok_or_else(|| D::Error::missing_field("id"))?
446            .to_string();
447        let content = mem
448            .get("content")
449            .and_then(|v| v.as_str())
450            .ok_or_else(|| D::Error::missing_field("content"))?
451            .to_string();
452        let memory_type: MemoryType = mem
453            .get("memory_type")
454            .and_then(|v| serde_json::from_value(v.clone()).ok())
455            .unwrap_or(MemoryType::Episodic);
456        let importance = mem
457            .get("importance")
458            .and_then(|v| v.as_f64())
459            .unwrap_or(0.5) as f32;
460        let tags: Vec<String> = mem
461            .get("tags")
462            .and_then(|v| serde_json::from_value(v.clone()).ok())
463            .unwrap_or_default();
464        let session_id = mem
465            .get("session_id")
466            .and_then(|v| v.as_str())
467            .map(String::from);
468        let metadata = mem.get("metadata").cloned().filter(|v| !v.is_null());
469        let created_at = mem.get("created_at").and_then(|v| v.as_u64()).unwrap_or(0);
470        let last_accessed_at = mem
471            .get("last_accessed_at")
472            .and_then(|v| v.as_u64())
473            .unwrap_or(0);
474        let access_count = mem
475            .get("access_count")
476            .and_then(|v| v.as_u64())
477            .unwrap_or(0) as u32;
478        let depth = mem.get("depth").and_then(|v| v.as_u64()).map(|v| v as u8);
479
480        Ok(Self {
481            id,
482            content,
483            memory_type,
484            importance,
485            score,
486            tags,
487            session_id,
488            metadata,
489            created_at,
490            last_accessed_at,
491            access_count,
492            depth,
493        })
494    }
495}
496
497/// Recall response
498#[derive(Debug, Clone, Serialize, Deserialize)]
499pub struct RecallResponse {
500    pub memories: Vec<RecalledMemory>,
501    #[serde(default)]
502    pub total_found: usize,
503    /// COG-2 / KG-3: KG associated memories at configurable depth (only present when include_associated was true)
504    #[serde(skip_serializing_if = "Option::is_none")]
505    pub associated_memories: Option<Vec<RecalledMemory>>,
506}
507
508/// Forget (delete) memories request
509#[derive(Debug, Clone, Serialize, Deserialize)]
510pub struct ForgetRequest {
511    pub agent_id: String,
512    #[serde(default)]
513    pub memory_ids: Vec<String>,
514    #[serde(default)]
515    pub tags: Vec<String>,
516    #[serde(skip_serializing_if = "Option::is_none")]
517    pub session_id: Option<String>,
518    #[serde(skip_serializing_if = "Option::is_none")]
519    pub before_timestamp: Option<u64>,
520}
521
522impl ForgetRequest {
523    /// Forget specific memories by ID
524    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
525        Self {
526            agent_id: agent_id.into(),
527            memory_ids: ids,
528            tags: Vec::new(),
529            session_id: None,
530            before_timestamp: None,
531        }
532    }
533
534    /// Forget memories with specific tags
535    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
536        Self {
537            agent_id: agent_id.into(),
538            memory_ids: Vec::new(),
539            tags,
540            session_id: None,
541            before_timestamp: None,
542        }
543    }
544
545    /// Forget all memories in a session
546    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
547        Self {
548            agent_id: agent_id.into(),
549            memory_ids: Vec::new(),
550            tags: Vec::new(),
551            session_id: Some(session_id.into()),
552            before_timestamp: None,
553        }
554    }
555}
556
557/// Forget response
558#[derive(Debug, Clone, Serialize, Deserialize)]
559pub struct ForgetResponse {
560    pub deleted_count: u64,
561}
562
563/// Session start request
564#[derive(Debug, Clone, Serialize, Deserialize)]
565pub struct SessionStartRequest {
566    pub agent_id: String,
567    #[serde(skip_serializing_if = "Option::is_none")]
568    pub metadata: Option<serde_json::Value>,
569}
570
571/// Session information
572#[derive(Debug, Clone, Serialize, Deserialize)]
573pub struct Session {
574    pub id: String,
575    pub agent_id: String,
576    pub started_at: u64,
577    #[serde(skip_serializing_if = "Option::is_none")]
578    pub ended_at: Option<u64>,
579    #[serde(skip_serializing_if = "Option::is_none")]
580    pub summary: Option<String>,
581    #[serde(skip_serializing_if = "Option::is_none")]
582    pub metadata: Option<serde_json::Value>,
583    /// Cached count of memories in this session
584    #[serde(default)]
585    pub memory_count: usize,
586}
587
588/// Session end request
589#[derive(Debug, Clone, Serialize, Deserialize)]
590pub struct SessionEndRequest {
591    #[serde(skip_serializing_if = "Option::is_none")]
592    pub summary: Option<String>,
593}
594
595/// Response from `POST /v1/sessions/start`
596#[derive(Debug, Clone, Serialize, Deserialize)]
597pub struct SessionStartResponse {
598    pub session: Session,
599}
600
601/// Response from `POST /v1/sessions/{id}/end`
602#[derive(Debug, Clone, Serialize, Deserialize)]
603pub struct SessionEndResponse {
604    pub session: Session,
605    pub memory_count: usize,
606}
607
608/// Request to update a memory
609#[derive(Debug, Clone, Serialize, Deserialize)]
610pub struct UpdateMemoryRequest {
611    #[serde(skip_serializing_if = "Option::is_none")]
612    pub content: Option<String>,
613    #[serde(skip_serializing_if = "Option::is_none")]
614    pub metadata: Option<serde_json::Value>,
615    #[serde(skip_serializing_if = "Option::is_none")]
616    pub memory_type: Option<MemoryType>,
617}
618
619/// Request to update memory importance
620#[derive(Debug, Clone, Serialize, Deserialize)]
621pub struct UpdateImportanceRequest {
622    pub memory_ids: Vec<String>,
623    pub importance: f32,
624}
625
626/// DBSCAN algorithm config for adaptive consolidation (CE-6).
627#[derive(Debug, Clone, Serialize, Deserialize, Default)]
628pub struct ConsolidationConfig {
629    /// Clustering algorithm: `"dbscan"` (default) or `"greedy"`.
630    #[serde(skip_serializing_if = "Option::is_none")]
631    pub algorithm: Option<String>,
632    /// Minimum cluster samples for DBSCAN.
633    #[serde(skip_serializing_if = "Option::is_none")]
634    pub min_samples: Option<u32>,
635    /// Epsilon distance parameter for DBSCAN.
636    #[serde(skip_serializing_if = "Option::is_none")]
637    pub eps: Option<f32>,
638}
639
640/// One step in the consolidation execution log (CE-6).
641#[derive(Debug, Clone, Serialize, Deserialize)]
642pub struct ConsolidationLogEntry {
643    pub step: String,
644    pub memories_before: usize,
645    pub memories_after: usize,
646    pub duration_ms: f64,
647}
648
649/// Request to consolidate memories
650#[derive(Debug, Clone, Serialize, Deserialize, Default)]
651pub struct ConsolidateRequest {
652    #[serde(skip_serializing_if = "Option::is_none")]
653    pub memory_type: Option<String>,
654    #[serde(skip_serializing_if = "Option::is_none")]
655    pub threshold: Option<f32>,
656    #[serde(default)]
657    pub dry_run: bool,
658    /// Optional DBSCAN algorithm configuration (CE-6).
659    #[serde(skip_serializing_if = "Option::is_none")]
660    pub config: Option<ConsolidationConfig>,
661}
662
663/// Response from consolidation (`POST /v1/memory/consolidate`).
664///
665/// The server returns `{"memories_removed": N, "source_memory_ids": [...], "consolidated_memory": {...}}`.
666/// `consolidated_count` is mapped from `memories_removed` for backward compat.
667#[derive(Debug, Clone, Serialize)]
668pub struct ConsolidateResponse {
669    /// Number of source memories removed (= `memories_removed` from server)
670    pub consolidated_count: usize,
671    /// Alias for consolidated_count
672    pub removed_count: usize,
673    /// IDs of source memories that were removed
674    #[serde(default)]
675    pub new_memories: Vec<String>,
676    /// Step-by-step consolidation log (CE-6, optional).
677    #[serde(default, skip_serializing_if = "Vec::is_empty")]
678    pub log: Vec<ConsolidationLogEntry>,
679}
680
681impl<'de> serde::Deserialize<'de> for ConsolidateResponse {
682    fn deserialize<D: serde::Deserializer<'de>>(
683        deserializer: D,
684    ) -> std::result::Result<Self, D::Error> {
685        let val = serde_json::Value::deserialize(deserializer)?;
686        // Server format: {"consolidated_memory":{...}, "source_memory_ids":[...], "memories_removed": N}
687        let removed = val
688            .get("memories_removed")
689            .and_then(|v| v.as_u64())
690            .or_else(|| val.get("removed_count").and_then(|v| v.as_u64()))
691            .or_else(|| val.get("consolidated_count").and_then(|v| v.as_u64()))
692            .unwrap_or(0) as usize;
693        let source_ids: Vec<String> = val
694            .get("source_memory_ids")
695            .and_then(|v| v.as_array())
696            .map(|arr| {
697                arr.iter()
698                    .filter_map(|v| v.as_str().map(String::from))
699                    .collect()
700            })
701            .unwrap_or_default();
702        Ok(Self {
703            consolidated_count: removed,
704            removed_count: removed,
705            new_memories: source_ids,
706            log: vec![],
707        })
708    }
709}
710
711// ============================================================================
712// DX-1: Memory Import / Export
713// ============================================================================
714
715/// Response from `POST /v1/import` (DX-1).
716#[derive(Debug, Clone, Serialize, Deserialize)]
717pub struct MemoryImportResponse {
718    pub imported_count: usize,
719    pub skipped_count: usize,
720    #[serde(default)]
721    pub errors: Vec<String>,
722}
723
724/// Response from `GET /v1/export` (DX-1).
725#[derive(Debug, Clone, Serialize, Deserialize)]
726pub struct MemoryExportResponse {
727    pub data: Vec<serde_json::Value>,
728    pub format: String,
729    pub count: usize,
730}
731
732// ============================================================================
733// OBS-1: Business-Event Audit Log
734// ============================================================================
735
736/// A single business-event entry from the audit log (OBS-1).
737#[derive(Debug, Clone, Serialize, Deserialize)]
738pub struct AuditEvent {
739    pub id: String,
740    pub event_type: String,
741    #[serde(skip_serializing_if = "Option::is_none")]
742    pub agent_id: Option<String>,
743    #[serde(skip_serializing_if = "Option::is_none")]
744    pub namespace: Option<String>,
745    pub timestamp: u64,
746    #[serde(default)]
747    pub details: serde_json::Value,
748}
749
750/// Response from `GET /v1/audit` (OBS-1).
751#[derive(Debug, Clone, Serialize, Deserialize)]
752pub struct AuditListResponse {
753    pub events: Vec<AuditEvent>,
754    pub total: usize,
755    #[serde(skip_serializing_if = "Option::is_none")]
756    pub cursor: Option<String>,
757}
758
759/// Response from `POST /v1/audit/export` (OBS-1).
760#[derive(Debug, Clone, Serialize, Deserialize)]
761pub struct AuditExportResponse {
762    pub data: String,
763    pub format: String,
764    pub count: usize,
765}
766
767/// Query parameters for the audit log (OBS-1).
768#[derive(Debug, Clone, Serialize, Deserialize, Default)]
769pub struct AuditQuery {
770    #[serde(skip_serializing_if = "Option::is_none")]
771    pub agent_id: Option<String>,
772    #[serde(skip_serializing_if = "Option::is_none")]
773    pub event_type: Option<String>,
774    #[serde(skip_serializing_if = "Option::is_none")]
775    pub from: Option<u64>,
776    #[serde(skip_serializing_if = "Option::is_none")]
777    pub to: Option<u64>,
778    #[serde(skip_serializing_if = "Option::is_none")]
779    pub limit: Option<u32>,
780    #[serde(skip_serializing_if = "Option::is_none")]
781    pub cursor: Option<String>,
782}
783
784// ============================================================================
785// EXT-1: External Extraction Providers
786// ============================================================================
787
788/// Result from `POST /v1/extract` (EXT-1).
789#[derive(Debug, Clone, Serialize, Deserialize)]
790pub struct ExtractionResult {
791    pub entities: Vec<serde_json::Value>,
792    pub provider: String,
793    #[serde(skip_serializing_if = "Option::is_none")]
794    pub model: Option<String>,
795    pub duration_ms: f64,
796}
797
798/// Metadata for an available extraction provider (EXT-1).
799#[derive(Debug, Clone, Serialize, Deserialize)]
800pub struct ExtractionProviderInfo {
801    pub name: String,
802    pub available: bool,
803    #[serde(default)]
804    pub models: Vec<String>,
805}
806
807/// Response from `GET /v1/extract/providers` (EXT-1).
808#[derive(Debug, Clone, Serialize, Deserialize)]
809#[serde(untagged)]
810pub enum ExtractProvidersResponse {
811    List(Vec<ExtractionProviderInfo>),
812    Object {
813        providers: Vec<ExtractionProviderInfo>,
814    },
815}
816
817// ============================================================================
818// SEC-3: AES-256-GCM Encryption Key Rotation
819// ============================================================================
820
821/// Request body for `POST /v1/admin/encryption/rotate-key` (SEC-3).
822#[derive(Debug, Clone, Serialize, Deserialize)]
823pub struct RotateEncryptionKeyRequest {
824    /// New passphrase or 64-char hex key to rotate to.
825    pub new_key: String,
826    /// If set, rotate only memories in this namespace. Omit to rotate all.
827    #[serde(skip_serializing_if = "Option::is_none")]
828    pub namespace: Option<String>,
829}
830
831/// Response from `POST /v1/admin/encryption/rotate-key` (SEC-3).
832#[derive(Debug, Clone, Serialize, Deserialize)]
833pub struct RotateEncryptionKeyResponse {
834    pub rotated: usize,
835    pub skipped: usize,
836    #[serde(default)]
837    pub namespaces: Vec<String>,
838}
839
840/// Request for memory feedback
841#[derive(Debug, Clone, Serialize, Deserialize)]
842pub struct FeedbackRequest {
843    pub memory_id: String,
844    pub feedback: String,
845    #[serde(skip_serializing_if = "Option::is_none")]
846    pub relevance_score: Option<f32>,
847}
848
849/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
850#[derive(Debug, Clone, Serialize, Deserialize)]
851pub struct LegacyFeedbackResponse {
852    pub status: String,
853    pub updated_importance: Option<f32>,
854}
855
856// ============================================================================
857// CE-2: Batch Recall / Forget Types
858// ============================================================================
859
860/// Filter predicates for batch memory operations (CE-2).
861///
862/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
863/// set (server-side safety guard).
864#[derive(Debug, Clone, Serialize, Deserialize, Default)]
865pub struct BatchMemoryFilter {
866    /// Restrict to memories that carry **all** listed tags.
867    #[serde(skip_serializing_if = "Option::is_none")]
868    pub tags: Option<Vec<String>>,
869    /// Minimum importance (inclusive).
870    #[serde(skip_serializing_if = "Option::is_none")]
871    pub min_importance: Option<f32>,
872    /// Maximum importance (inclusive).
873    #[serde(skip_serializing_if = "Option::is_none")]
874    pub max_importance: Option<f32>,
875    /// Only memories created at or after this Unix timestamp (seconds).
876    #[serde(skip_serializing_if = "Option::is_none")]
877    pub created_after: Option<u64>,
878    /// Only memories created before or at this Unix timestamp (seconds).
879    #[serde(skip_serializing_if = "Option::is_none")]
880    pub created_before: Option<u64>,
881    /// Restrict to a specific memory type.
882    #[serde(skip_serializing_if = "Option::is_none")]
883    pub memory_type: Option<MemoryType>,
884    /// Restrict to memories from a specific session.
885    #[serde(skip_serializing_if = "Option::is_none")]
886    pub session_id: Option<String>,
887}
888
889impl BatchMemoryFilter {
890    /// Convenience: filter by tags.
891    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
892        self.tags = Some(tags);
893        self
894    }
895
896    /// Convenience: filter by minimum importance.
897    pub fn with_min_importance(mut self, min: f32) -> Self {
898        self.min_importance = Some(min);
899        self
900    }
901
902    /// Convenience: filter by maximum importance.
903    pub fn with_max_importance(mut self, max: f32) -> Self {
904        self.max_importance = Some(max);
905        self
906    }
907
908    /// Convenience: filter by session.
909    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
910        self.session_id = Some(session_id.into());
911        self
912    }
913}
914
915/// Request body for `POST /v1/memories/recall/batch`.
916#[derive(Debug, Clone, Serialize, Deserialize)]
917pub struct BatchRecallRequest {
918    /// Agent whose memory namespace to search.
919    pub agent_id: String,
920    /// Filter predicates to apply.
921    #[serde(default)]
922    pub filter: BatchMemoryFilter,
923    /// Maximum number of results to return (default: 100).
924    #[serde(default = "default_batch_limit")]
925    pub limit: usize,
926}
927
928fn default_batch_limit() -> usize {
929    100
930}
931
932impl BatchRecallRequest {
933    /// Create a new batch recall request for an agent.
934    pub fn new(agent_id: impl Into<String>) -> Self {
935        Self {
936            agent_id: agent_id.into(),
937            filter: BatchMemoryFilter::default(),
938            limit: 100,
939        }
940    }
941
942    /// Set filter predicates.
943    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
944        self.filter = filter;
945        self
946    }
947
948    /// Set result limit.
949    pub fn with_limit(mut self, limit: usize) -> Self {
950        self.limit = limit;
951        self
952    }
953}
954
955/// Response from `POST /v1/memories/recall/batch`.
956#[derive(Debug, Clone, Serialize, Deserialize)]
957pub struct BatchRecallResponse {
958    pub memories: Vec<RecalledMemory>,
959    /// Total memories in the agent namespace.
960    pub total: usize,
961    /// Number of memories that passed the filter.
962    pub filtered: usize,
963}
964
965/// Request body for `DELETE /v1/memories/forget/batch`.
966#[derive(Debug, Clone, Serialize, Deserialize)]
967pub struct BatchForgetRequest {
968    /// Agent whose memory namespace to purge from.
969    pub agent_id: String,
970    /// Filter predicates — **at least one must be set** (server safety guard).
971    pub filter: BatchMemoryFilter,
972}
973
974impl BatchForgetRequest {
975    /// Create a new batch forget request with the given filter.
976    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
977        Self {
978            agent_id: agent_id.into(),
979            filter,
980        }
981    }
982}
983
984/// Response from `DELETE /v1/memories/forget/batch`.
985#[derive(Debug, Clone, Serialize, Deserialize)]
986pub struct BatchForgetResponse {
987    pub deleted_count: usize,
988}
989
990// ============================================================================
991// Memory Client Methods
992// ============================================================================
993
994impl DakeraClient {
995    // ========================================================================
996    // Memory Operations
997    // ========================================================================
998
999    /// Store a memory for an agent
1000    ///
1001    /// # Example
1002    ///
1003    /// ```rust,no_run
1004    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
1005    ///
1006    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1007    /// let client = DakeraClient::new("http://localhost:3000")?;
1008    ///
1009    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
1010    ///     .with_importance(0.8)
1011    ///     .with_tags(vec!["preferences".to_string()]);
1012    ///
1013    /// let response = client.store_memory(request).await?;
1014    /// println!("Stored memory: {}", response.memory_id);
1015    /// # Ok(())
1016    /// # }
1017    /// ```
1018    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
1019        let url = format!("{}/v1/memory/store", self.base_url);
1020        let response = self.client.post(&url).json(&request).send().await?;
1021        self.handle_response(response).await
1022    }
1023
1024    /// Recall memories by semantic query
1025    ///
1026    /// # Example
1027    ///
1028    /// ```rust,no_run
1029    /// use dakera_client::{DakeraClient, memory::RecallRequest};
1030    ///
1031    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1032    /// let client = DakeraClient::new("http://localhost:3000")?;
1033    ///
1034    /// let request = RecallRequest::new("agent-1", "user preferences")
1035    ///     .with_top_k(10);
1036    ///
1037    /// let response = client.recall(request).await?;
1038    /// for memory in response.memories {
1039    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
1040    /// }
1041    /// # Ok(())
1042    /// # }
1043    /// ```
1044    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
1045        let url = format!("{}/v1/memory/recall", self.base_url);
1046        let response = self.client.post(&url).json(&request).send().await?;
1047        self.handle_response(response).await
1048    }
1049
1050    /// Simple recall with just agent_id and query (convenience method)
1051    pub async fn recall_simple(
1052        &self,
1053        agent_id: &str,
1054        query: &str,
1055        top_k: usize,
1056    ) -> Result<RecallResponse> {
1057        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
1058            .await
1059    }
1060
1061    /// Get a specific memory by ID
1062    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
1063        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
1064        let response = self.client.get(&url).send().await?;
1065        self.handle_response(response).await
1066    }
1067
1068    /// Forget (delete) memories
1069    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
1070        let url = format!("{}/v1/memory/forget", self.base_url);
1071        let response = self.client.post(&url).json(&request).send().await?;
1072        self.handle_response(response).await
1073    }
1074
1075    /// Search memories with advanced filters
1076    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
1077        let url = format!("{}/v1/memory/search", self.base_url);
1078        let response = self.client.post(&url).json(&request).send().await?;
1079        self.handle_response(response).await
1080    }
1081
1082    /// Update an existing memory
1083    pub async fn update_memory(
1084        &self,
1085        agent_id: &str,
1086        memory_id: &str,
1087        request: UpdateMemoryRequest,
1088    ) -> Result<StoreMemoryResponse> {
1089        let url = format!(
1090            "{}/v1/agents/{}/memories/{}",
1091            self.base_url, agent_id, memory_id
1092        );
1093        let response = self.client.put(&url).json(&request).send().await?;
1094        self.handle_response(response).await
1095    }
1096
1097    /// Update importance of memories
1098    pub async fn update_importance(
1099        &self,
1100        agent_id: &str,
1101        request: UpdateImportanceRequest,
1102    ) -> Result<serde_json::Value> {
1103        let url = format!(
1104            "{}/v1/agents/{}/memories/importance",
1105            self.base_url, agent_id
1106        );
1107        let response = self.client.put(&url).json(&request).send().await?;
1108        self.handle_response(response).await
1109    }
1110
1111    /// Consolidate memories for an agent
1112    pub async fn consolidate(
1113        &self,
1114        agent_id: &str,
1115        request: ConsolidateRequest,
1116    ) -> Result<ConsolidateResponse> {
1117        // Server endpoint: POST /v1/memory/consolidate with agent_id in body
1118        let url = format!("{}/v1/memory/consolidate", self.base_url);
1119        let mut body = serde_json::to_value(&request)?;
1120        body["agent_id"] = serde_json::Value::String(agent_id.to_string());
1121        let response = self.client.post(&url).json(&body).send().await?;
1122        self.handle_response(response).await
1123    }
1124
1125    /// Submit feedback on a memory recall
1126    pub async fn memory_feedback(
1127        &self,
1128        agent_id: &str,
1129        request: FeedbackRequest,
1130    ) -> Result<LegacyFeedbackResponse> {
1131        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
1132        let response = self.client.post(&url).json(&request).send().await?;
1133        self.handle_response(response).await
1134    }
1135
1136    // ========================================================================
1137    // Memory Feedback Loop — INT-1
1138    // ========================================================================
1139
1140    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
1141    ///
1142    /// # Arguments
1143    /// * `memory_id` – The memory to give feedback on.
1144    /// * `agent_id` – The agent that owns the memory.
1145    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
1146    ///
1147    /// # Example
1148    /// ```no_run
1149    /// # use dakera_client::{DakeraClient, FeedbackSignal};
1150    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1151    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
1152    /// println!("new importance: {}", resp.new_importance);
1153    /// # Ok(()) }
1154    /// ```
1155    pub async fn feedback_memory(
1156        &self,
1157        memory_id: &str,
1158        agent_id: &str,
1159        signal: FeedbackSignal,
1160    ) -> Result<FeedbackResponse> {
1161        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1162        let body = MemoryFeedbackBody {
1163            agent_id: agent_id.to_string(),
1164            signal,
1165        };
1166        let response = self.client.post(&url).json(&body).send().await?;
1167        self.handle_response(response).await
1168    }
1169
1170    /// Get the full feedback history for a memory (INT-1).
1171    pub async fn get_memory_feedback_history(
1172        &self,
1173        memory_id: &str,
1174    ) -> Result<FeedbackHistoryResponse> {
1175        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1176        let response = self.client.get(&url).send().await?;
1177        self.handle_response(response).await
1178    }
1179
1180    /// Get aggregate feedback counts and health score for an agent (INT-1).
1181    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
1182        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
1183        let response = self.client.get(&url).send().await?;
1184        self.handle_response(response).await
1185    }
1186
1187    /// Directly override a memory's importance score (INT-1).
1188    ///
1189    /// # Arguments
1190    /// * `memory_id` – The memory to update.
1191    /// * `agent_id` – The agent that owns the memory.
1192    /// * `importance` – New importance value (0.0–1.0).
1193    pub async fn patch_memory_importance(
1194        &self,
1195        memory_id: &str,
1196        agent_id: &str,
1197        importance: f32,
1198    ) -> Result<FeedbackResponse> {
1199        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
1200        let body = MemoryImportancePatch {
1201            agent_id: agent_id.to_string(),
1202            importance,
1203        };
1204        let response = self.client.patch(&url).json(&body).send().await?;
1205        self.handle_response(response).await
1206    }
1207
1208    /// Get overall feedback health score for an agent (INT-1).
1209    ///
1210    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
1211    /// A higher score indicates a healthier, more relevant memory store.
1212    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
1213        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
1214        let response = self.client.get(&url).send().await?;
1215        self.handle_response(response).await
1216    }
1217
1218    // ========================================================================
1219    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
1220    // ========================================================================
1221
1222    /// Traverse the knowledge graph from a memory node.
1223    ///
1224    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1225    ///
1226    /// # Arguments
1227    /// * `memory_id` – Root memory ID to start traversal from.
1228    /// * `options` – Traversal options (depth, edge type filters).
1229    ///
1230    /// # Example
1231    /// ```no_run
1232    /// # use dakera_client::{DakeraClient, GraphOptions};
1233    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1234    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
1235    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
1236    /// # Ok(()) }
1237    /// ```
1238    pub async fn memory_graph(
1239        &self,
1240        memory_id: &str,
1241        options: GraphOptions,
1242    ) -> Result<MemoryGraph> {
1243        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
1244        let depth = options.depth.unwrap_or(1);
1245        url.push_str(&format!("?depth={}", depth));
1246        if let Some(types) = &options.types {
1247            let type_strs: Vec<String> = types
1248                .iter()
1249                .map(|t| {
1250                    serde_json::to_value(t)
1251                        .unwrap()
1252                        .as_str()
1253                        .unwrap_or("")
1254                        .to_string()
1255                })
1256                .collect();
1257            if !type_strs.is_empty() {
1258                url.push_str(&format!("&types={}", type_strs.join(",")));
1259            }
1260        }
1261        let response = self.client.get(&url).send().await?;
1262        self.handle_response(response).await
1263    }
1264
1265    /// Find the shortest path between two memories in the knowledge graph.
1266    ///
1267    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1268    ///
1269    /// # Example
1270    /// ```no_run
1271    /// # use dakera_client::DakeraClient;
1272    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1273    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
1274    /// println!("{} hops: {:?}", path.hops, path.path);
1275    /// # Ok(()) }
1276    /// ```
1277    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
1278        let url = format!(
1279            "{}/v1/memories/{}/path?target={}",
1280            self.base_url,
1281            source_id,
1282            urlencoding::encode(target_id)
1283        );
1284        let response = self.client.get(&url).send().await?;
1285        self.handle_response(response).await
1286    }
1287
1288    /// Create an explicit edge between two memories.
1289    ///
1290    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1291    ///
1292    /// # Example
1293    /// ```no_run
1294    /// # use dakera_client::{DakeraClient, EdgeType};
1295    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1296    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
1297    /// println!("Created edge: {}", resp.edge.id);
1298    /// # Ok(()) }
1299    /// ```
1300    pub async fn memory_link(
1301        &self,
1302        source_id: &str,
1303        target_id: &str,
1304        edge_type: EdgeType,
1305    ) -> Result<GraphLinkResponse> {
1306        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
1307        let request = GraphLinkRequest {
1308            target_id: target_id.to_string(),
1309            edge_type,
1310        };
1311        let response = self.client.post(&url).json(&request).send().await?;
1312        self.handle_response(response).await
1313    }
1314
1315    /// Export the full knowledge graph for an agent.
1316    ///
1317    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1318    ///
1319    /// # Arguments
1320    /// * `agent_id` – Agent whose graph to export.
1321    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
1322    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
1323        let url = format!(
1324            "{}/v1/agents/{}/graph/export?format={}",
1325            self.base_url, agent_id, format
1326        );
1327        let response = self.client.get(&url).send().await?;
1328        self.handle_response(response).await
1329    }
1330
1331    // ========================================================================
1332    // Session Operations
1333    // ========================================================================
1334
1335    /// Start a new session for an agent
1336    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
1337        let url = format!("{}/v1/sessions/start", self.base_url);
1338        let request = SessionStartRequest {
1339            agent_id: agent_id.to_string(),
1340            metadata: None,
1341        };
1342        let response = self.client.post(&url).json(&request).send().await?;
1343        let resp: SessionStartResponse = self.handle_response(response).await?;
1344        Ok(resp.session)
1345    }
1346
1347    /// Start a session with metadata
1348    pub async fn start_session_with_metadata(
1349        &self,
1350        agent_id: &str,
1351        metadata: serde_json::Value,
1352    ) -> Result<Session> {
1353        let url = format!("{}/v1/sessions/start", self.base_url);
1354        let request = SessionStartRequest {
1355            agent_id: agent_id.to_string(),
1356            metadata: Some(metadata),
1357        };
1358        let response = self.client.post(&url).json(&request).send().await?;
1359        let resp: SessionStartResponse = self.handle_response(response).await?;
1360        Ok(resp.session)
1361    }
1362
1363    /// End a session, optionally with a summary.
1364    /// Returns the session state and the total memory count at close.
1365    pub async fn end_session(
1366        &self,
1367        session_id: &str,
1368        summary: Option<String>,
1369    ) -> Result<SessionEndResponse> {
1370        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
1371        let request = SessionEndRequest { summary };
1372        let response = self.client.post(&url).json(&request).send().await?;
1373        self.handle_response(response).await
1374    }
1375
1376    /// Get a session by ID
1377    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
1378        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
1379        let response = self.client.get(&url).send().await?;
1380        self.handle_response(response).await
1381    }
1382
1383    /// List sessions for an agent
1384    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
1385        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
1386        let response = self.client.get(&url).send().await?;
1387        self.handle_response(response).await
1388    }
1389
1390    /// Get memories in a session
1391    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
1392        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
1393        let response = self.client.get(&url).send().await?;
1394        self.handle_response(response).await
1395    }
1396
1397    // ========================================================================
1398    // CE-2: Batch Recall / Forget
1399    // ========================================================================
1400
1401    /// Bulk-recall memories using filter predicates (CE-2).
1402    ///
1403    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
1404    ///
1405    /// # Example
1406    ///
1407    /// ```rust,no_run
1408    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
1409    ///
1410    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1411    /// let client = DakeraClient::new("http://localhost:3000")?;
1412    ///
1413    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
1414    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
1415    /// let resp = client.batch_recall(req).await?;
1416    /// println!("Found {} memories", resp.filtered);
1417    /// # Ok(())
1418    /// # }
1419    /// ```
1420    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
1421        let url = format!("{}/v1/memories/recall/batch", self.base_url);
1422        let response = self.client.post(&url).json(&request).send().await?;
1423        self.handle_response(response).await
1424    }
1425
1426    /// Bulk-delete memories using filter predicates (CE-2).
1427    ///
1428    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
1429    /// one filter predicate to be set as a safety guard.
1430    ///
1431    /// # Example
1432    ///
1433    /// ```rust,no_run
1434    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
1435    ///
1436    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1437    /// let client = DakeraClient::new("http://localhost:3000")?;
1438    ///
1439    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
1440    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
1441    /// println!("Deleted {} memories", resp.deleted_count);
1442    /// # Ok(())
1443    /// # }
1444    /// ```
1445    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
1446        let url = format!("{}/v1/memories/forget/batch", self.base_url);
1447        let response = self.client.delete(&url).json(&request).send().await?;
1448        self.handle_response(response).await
1449    }
1450
1451    // ========================================================================
1452    // DX-1: Memory Import / Export
1453    // ========================================================================
1454
1455    /// Import memories from an external format (DX-1).
1456    ///
1457    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1458    ///
1459    /// ```no_run
1460    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1461    /// let client = dakera_client::DakeraClient::new("http://localhost:3000")?;
1462    /// let data = serde_json::json!([{"content": "hello", "agent_id": "agent-1"}]);
1463    /// let resp = client.import_memories(data, "jsonl", None, None).await?;
1464    /// println!("Imported {} memories", resp.imported_count);
1465    /// # Ok(())
1466    /// # }
1467    /// ```
1468    pub async fn import_memories(
1469        &self,
1470        data: serde_json::Value,
1471        format: &str,
1472        agent_id: Option<&str>,
1473        namespace: Option<&str>,
1474    ) -> Result<MemoryImportResponse> {
1475        let mut body = serde_json::json!({"data": data, "format": format});
1476        if let Some(aid) = agent_id {
1477            body["agent_id"] = serde_json::Value::String(aid.to_string());
1478        }
1479        if let Some(ns) = namespace {
1480            body["namespace"] = serde_json::Value::String(ns.to_string());
1481        }
1482        let url = format!("{}/v1/import", self.base_url);
1483        let response = self.client.post(&url).json(&body).send().await?;
1484        self.handle_response(response).await
1485    }
1486
1487    /// Export memories in a portable format (DX-1).
1488    ///
1489    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1490    pub async fn export_memories(
1491        &self,
1492        format: &str,
1493        agent_id: Option<&str>,
1494        namespace: Option<&str>,
1495        limit: Option<u32>,
1496    ) -> Result<MemoryExportResponse> {
1497        let mut params = vec![("format", format.to_string())];
1498        if let Some(aid) = agent_id {
1499            params.push(("agent_id", aid.to_string()));
1500        }
1501        if let Some(ns) = namespace {
1502            params.push(("namespace", ns.to_string()));
1503        }
1504        if let Some(l) = limit {
1505            params.push(("limit", l.to_string()));
1506        }
1507        let url = format!("{}/v1/export", self.base_url);
1508        let response = self.client.get(&url).query(&params).send().await?;
1509        self.handle_response(response).await
1510    }
1511
1512    // ========================================================================
1513    // OBS-1: Business-Event Audit Log
1514    // ========================================================================
1515
1516    /// List paginated audit log entries (OBS-1).
1517    pub async fn list_audit_events(&self, query: AuditQuery) -> Result<AuditListResponse> {
1518        let url = format!("{}/v1/audit", self.base_url);
1519        let response = self.client.get(&url).query(&query).send().await?;
1520        self.handle_response(response).await
1521    }
1522
1523    /// Stream live audit events via SSE (OBS-1).
1524    ///
1525    /// Returns a [`tokio::sync::mpsc::Receiver`] that yields [`DakeraEvent`] results.
1526    pub async fn stream_audit_events(
1527        &self,
1528        agent_id: Option<&str>,
1529        event_type: Option<&str>,
1530    ) -> Result<tokio::sync::mpsc::Receiver<Result<crate::events::DakeraEvent>>> {
1531        let mut params: Vec<(&str, String)> = Vec::new();
1532        if let Some(aid) = agent_id {
1533            params.push(("agent_id", aid.to_string()));
1534        }
1535        if let Some(et) = event_type {
1536            params.push(("event_type", et.to_string()));
1537        }
1538        let base = format!("{}/v1/audit/stream", self.base_url);
1539        let url = if params.is_empty() {
1540            base
1541        } else {
1542            let qs = params
1543                .iter()
1544                .map(|(k, v)| format!("{}={}", k, urlencoding::encode(v)))
1545                .collect::<Vec<_>>()
1546                .join("&");
1547            format!("{}?{}", base, qs)
1548        };
1549        self.stream_sse(url).await
1550    }
1551
1552    /// Bulk-export audit log entries (OBS-1).
1553    pub async fn export_audit(
1554        &self,
1555        format: &str,
1556        agent_id: Option<&str>,
1557        event_type: Option<&str>,
1558        from_ts: Option<u64>,
1559        to_ts: Option<u64>,
1560    ) -> Result<AuditExportResponse> {
1561        let mut body = serde_json::json!({"format": format});
1562        if let Some(aid) = agent_id {
1563            body["agent_id"] = serde_json::Value::String(aid.to_string());
1564        }
1565        if let Some(et) = event_type {
1566            body["event_type"] = serde_json::Value::String(et.to_string());
1567        }
1568        if let Some(f) = from_ts {
1569            body["from"] = serde_json::Value::Number(f.into());
1570        }
1571        if let Some(t) = to_ts {
1572            body["to"] = serde_json::Value::Number(t.into());
1573        }
1574        let url = format!("{}/v1/audit/export", self.base_url);
1575        let response = self.client.post(&url).json(&body).send().await?;
1576        self.handle_response(response).await
1577    }
1578
1579    // ========================================================================
1580    // EXT-1: External Extraction Providers
1581    // ========================================================================
1582
1583    /// Extract entities from text using a pluggable provider (EXT-1).
1584    ///
1585    /// Provider hierarchy: per-request > namespace default > GLiNER (bundled).
1586    /// Supported providers: `"gliner"`, `"openai"`, `"anthropic"`, `"openrouter"`, `"ollama"`.
1587    pub async fn extract_text(
1588        &self,
1589        text: &str,
1590        namespace: Option<&str>,
1591        provider: Option<&str>,
1592        model: Option<&str>,
1593    ) -> Result<ExtractionResult> {
1594        let mut body = serde_json::json!({"text": text});
1595        if let Some(ns) = namespace {
1596            body["namespace"] = serde_json::Value::String(ns.to_string());
1597        }
1598        if let Some(p) = provider {
1599            body["provider"] = serde_json::Value::String(p.to_string());
1600        }
1601        if let Some(m) = model {
1602            body["model"] = serde_json::Value::String(m.to_string());
1603        }
1604        let url = format!("{}/v1/extract", self.base_url);
1605        let response = self.client.post(&url).json(&body).send().await?;
1606        self.handle_response(response).await
1607    }
1608
1609    /// List available extraction providers and their models (EXT-1).
1610    pub async fn list_extract_providers(&self) -> Result<Vec<ExtractionProviderInfo>> {
1611        let url = format!("{}/v1/extract/providers", self.base_url);
1612        let response = self.client.get(&url).send().await?;
1613        let result: ExtractProvidersResponse = self.handle_response(response).await?;
1614        Ok(match result {
1615            ExtractProvidersResponse::List(v) => v,
1616            ExtractProvidersResponse::Object { providers } => providers,
1617        })
1618    }
1619
1620    /// Set the default extraction provider for a namespace (EXT-1).
1621    pub async fn configure_namespace_extractor(
1622        &self,
1623        namespace: &str,
1624        provider: &str,
1625        model: Option<&str>,
1626    ) -> Result<serde_json::Value> {
1627        let mut body = serde_json::json!({"provider": provider});
1628        if let Some(m) = model {
1629            body["model"] = serde_json::Value::String(m.to_string());
1630        }
1631        let url = format!(
1632            "{}/v1/namespaces/{}/extractor",
1633            self.base_url,
1634            urlencoding::encode(namespace)
1635        );
1636        let response = self.client.patch(&url).json(&body).send().await?;
1637        self.handle_response(response).await
1638    }
1639
1640    // =========================================================================
1641    // SEC-3: AES-256-GCM Encryption Key Rotation
1642    // =========================================================================
1643
1644    /// Re-encrypt all memory content blobs with a new AES-256-GCM key (SEC-3).
1645    ///
1646    /// After this call the new key is active in the running process.
1647    /// The operator must update `DAKERA_ENCRYPTION_KEY` and restart to make
1648    /// the rotation durable across restarts.
1649    ///
1650    /// Requires Admin scope.
1651    ///
1652    /// # Arguments
1653    /// * `new_key` - New passphrase or 64-char hex key.
1654    /// * `namespace` - If `Some`, rotate only this namespace. `None` rotates all.
1655    pub async fn rotate_encryption_key(
1656        &self,
1657        new_key: &str,
1658        namespace: Option<&str>,
1659    ) -> Result<RotateEncryptionKeyResponse> {
1660        let body = RotateEncryptionKeyRequest {
1661            new_key: new_key.to_string(),
1662            namespace: namespace.map(|s| s.to_string()),
1663        };
1664        let url = format!("{}/v1/admin/encryption/rotate-key", self.base_url);
1665        let response = self.client.post(&url).json(&body).send().await?;
1666        self.handle_response(response).await
1667    }
1668}