Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22#[serde(rename_all = "lowercase")]
23pub enum MemoryType {
24    #[default]
25    Episodic,
26    Semantic,
27    Procedural,
28    Working,
29}
30
31/// Store a memory request
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct StoreMemoryRequest {
34    pub agent_id: String,
35    pub content: String,
36    #[serde(default)]
37    pub memory_type: MemoryType,
38    #[serde(default = "default_importance")]
39    pub importance: f32,
40    #[serde(default)]
41    pub tags: Vec<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub session_id: Option<String>,
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub metadata: Option<serde_json::Value>,
46    /// Optional TTL in seconds. The memory is hard-deleted after this many
47    /// seconds from creation.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub ttl_seconds: Option<u64>,
50    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
51    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
52    /// decay engine on expiry (DECAY-3).
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub expires_at: Option<u64>,
55}
56
57fn default_importance() -> f32 {
58    0.5
59}
60
61impl StoreMemoryRequest {
62    /// Create a new store memory request
63    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
64        Self {
65            agent_id: agent_id.into(),
66            content: content.into(),
67            memory_type: MemoryType::default(),
68            importance: 0.5,
69            tags: Vec::new(),
70            session_id: None,
71            metadata: None,
72            ttl_seconds: None,
73            expires_at: None,
74        }
75    }
76
77    /// Set memory type
78    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
79        self.memory_type = memory_type;
80        self
81    }
82
83    /// Set importance score
84    pub fn with_importance(mut self, importance: f32) -> Self {
85        self.importance = importance.clamp(0.0, 1.0);
86        self
87    }
88
89    /// Set tags
90    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
91        self.tags = tags;
92        self
93    }
94
95    /// Set session ID
96    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
97        self.session_id = Some(session_id.into());
98        self
99    }
100
101    /// Set metadata
102    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
103        self.metadata = Some(metadata);
104        self
105    }
106
107    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
108    /// from creation.
109    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
110        self.ttl_seconds = Some(ttl_seconds);
111        self
112    }
113
114    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
115    /// `ttl_seconds` when both are set (DECAY-3).
116    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
117        self.expires_at = Some(expires_at);
118        self
119    }
120}
121
122/// Stored memory response from `POST /v1/memory/store`.
123///
124/// The server wraps the memory in a nested `memory` object:
125/// `{"memory": {"id": "...", "agent_id": "...", ...}, "embedding_time_ms": N}`.
126/// The `memory_id` and `agent_id` fields are convenience accessors mapped from
127/// `memory.id` and `memory.agent_id` respectively.
128#[derive(Debug, Clone, Serialize)]
129pub struct StoreMemoryResponse {
130    /// Memory ID (mapped from `memory.id`)
131    pub memory_id: String,
132    /// Agent ID (mapped from `memory.agent_id`)
133    pub agent_id: String,
134    /// Namespace (mapped from `memory.namespace`, defaults to `"default"`)
135    pub namespace: String,
136    /// Embedding latency in milliseconds
137    pub embedding_time_ms: Option<u64>,
138}
139
140impl<'de> serde::Deserialize<'de> for StoreMemoryResponse {
141    fn deserialize<D: serde::Deserializer<'de>>(
142        deserializer: D,
143    ) -> std::result::Result<Self, D::Error> {
144        use serde::de::Error;
145        let val = serde_json::Value::deserialize(deserializer)?;
146
147        // Server response: {"memory": {"id":"...","agent_id":"...",...}, "embedding_time_ms": N}
148        if let Some(memory) = val.get("memory") {
149            let memory_id = memory
150                .get("id")
151                .and_then(|v| v.as_str())
152                .ok_or_else(|| D::Error::missing_field("memory.id"))?
153                .to_string();
154            let agent_id = memory
155                .get("agent_id")
156                .and_then(|v| v.as_str())
157                .unwrap_or("")
158                .to_string();
159            let namespace = memory
160                .get("namespace")
161                .and_then(|v| v.as_str())
162                .unwrap_or("default")
163                .to_string();
164            let embedding_time_ms = val.get("embedding_time_ms").and_then(|v| v.as_u64());
165            return Ok(Self {
166                memory_id,
167                agent_id,
168                namespace,
169                embedding_time_ms,
170            });
171        }
172
173        // Legacy / mock format: {"memory_id":"...","agent_id":"...","namespace":"..."}
174        let memory_id = val
175            .get("memory_id")
176            .and_then(|v| v.as_str())
177            .ok_or_else(|| D::Error::missing_field("memory_id"))?
178            .to_string();
179        let agent_id = val
180            .get("agent_id")
181            .and_then(|v| v.as_str())
182            .unwrap_or("")
183            .to_string();
184        let namespace = val
185            .get("namespace")
186            .and_then(|v| v.as_str())
187            .unwrap_or("default")
188            .to_string();
189        Ok(Self {
190            memory_id,
191            agent_id,
192            namespace,
193            embedding_time_ms: None,
194        })
195    }
196}
197
198/// Fusion strategy for hybrid recall (CE-14).
199///
200/// Controls how vector and BM25 scores are combined when `routing = Hybrid`.
201/// `Rrf` (default) uses Reciprocal Rank Fusion (Cormack et al., SIGIR 2009).
202#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
203#[serde(rename_all = "snake_case")]
204pub enum FusionStrategy {
205    /// Reciprocal Rank Fusion — default, best for recall tasks.
206    /// Formula: score(d) = Σ 1 / (k + rank(d)), k = 60.
207    #[default]
208    Rrf,
209    /// Legacy weighted min-max normalization.
210    #[serde(rename = "minmax")]
211    MinMax,
212}
213
214/// Retrieval routing mode for recall and search (CE-10).
215///
216/// Controls which retrieval index the server uses. `Auto` (default) lets the
217/// server pick the best strategy based on the query.
218#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
219#[serde(rename_all = "snake_case")]
220pub enum RoutingMode {
221    /// Server picks the best strategy (default).
222    Auto,
223    /// Force ANN vector search (HNSW).
224    Vector,
225    /// Force BM25 full-text search.
226    Bm25,
227    /// Fuse ANN and BM25 scores (RRF).
228    Hybrid,
229}
230
231/// Recall memories request
232#[derive(Debug, Clone, Serialize, Deserialize)]
233pub struct RecallRequest {
234    pub agent_id: String,
235    pub query: String,
236    #[serde(default = "default_top_k")]
237    pub top_k: usize,
238    #[serde(skip_serializing_if = "Option::is_none")]
239    pub memory_type: Option<MemoryType>,
240    #[serde(default)]
241    pub min_importance: f32,
242    #[serde(skip_serializing_if = "Option::is_none")]
243    pub session_id: Option<String>,
244    #[serde(default)]
245    pub tags: Vec<String>,
246    /// COG-2: traverse KG depth-1 from recalled memories and include
247    /// associatively linked memories in the response (default: false)
248    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
249    pub include_associated: bool,
250    /// COG-2: max associated memories to return (default: 10, max: 10)
251    #[serde(skip_serializing_if = "Option::is_none")]
252    pub associated_memories_cap: Option<u32>,
253    /// KG-3: KG traversal depth 1–3 (default: 1); requires include_associated
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub associated_memories_depth: Option<u8>,
256    /// KG-3: minimum edge weight for KG traversal (default: 0.0)
257    #[serde(skip_serializing_if = "Option::is_none")]
258    pub associated_memories_min_weight: Option<f32>,
259    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
260    #[serde(skip_serializing_if = "Option::is_none")]
261    pub since: Option<String>,
262    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
263    #[serde(skip_serializing_if = "Option::is_none")]
264    pub until: Option<String>,
265    /// CE-10: retrieval routing mode. `None` uses the server default (`auto`).
266    #[serde(skip_serializing_if = "Option::is_none")]
267    pub routing: Option<RoutingMode>,
268    /// CE-13: cross-encoder reranking. `None` uses server default (`true` for recall,
269    /// `false` for search). Set to `Some(false)` to disable on latency-sensitive paths.
270    #[serde(skip_serializing_if = "Option::is_none")]
271    pub rerank: Option<bool>,
272    /// CE-14: fusion strategy when `routing = Hybrid`. `None` uses server default (`Rrf`).
273    #[serde(skip_serializing_if = "Option::is_none")]
274    pub fusion: Option<FusionStrategy>,
275    /// v0.11.0: fetch session-adjacent memories within ±5 min of each top result.
276    /// `None` uses server default (`true`). Set to `Some(false)` to disable for
277    /// latency-sensitive paths.
278    #[serde(skip_serializing_if = "Option::is_none")]
279    pub neighborhood: Option<bool>,
280}
281
282fn default_top_k() -> usize {
283    5
284}
285
286impl RecallRequest {
287    /// Create a new recall request
288    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
289        Self {
290            agent_id: agent_id.into(),
291            query: query.into(),
292            top_k: 5,
293            memory_type: None,
294            min_importance: 0.0,
295            session_id: None,
296            tags: Vec::new(),
297            include_associated: false,
298            associated_memories_cap: None,
299            associated_memories_depth: None,
300            associated_memories_min_weight: None,
301            since: None,
302            until: None,
303            routing: None,
304            rerank: None,
305            fusion: None,
306            neighborhood: None,
307        }
308    }
309
310    /// Set number of results
311    pub fn with_top_k(mut self, top_k: usize) -> Self {
312        self.top_k = top_k;
313        self
314    }
315
316    /// Filter by memory type
317    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
318        self.memory_type = Some(memory_type);
319        self
320    }
321
322    /// Set minimum importance threshold
323    pub fn with_min_importance(mut self, min: f32) -> Self {
324        self.min_importance = min;
325        self
326    }
327
328    /// Filter by session
329    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
330        self.session_id = Some(session_id.into());
331        self
332    }
333
334    /// Filter by tags
335    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
336        self.tags = tags;
337        self
338    }
339
340    /// COG-2: include KG depth-1 associated memories in the response
341    pub fn with_associated(mut self) -> Self {
342        self.include_associated = true;
343        self
344    }
345
346    /// COG-2: set max associated memories cap (default: 10, max: 10)
347    pub fn with_associated_cap(mut self, cap: u32) -> Self {
348        self.include_associated = true;
349        self.associated_memories_cap = Some(cap);
350        self
351    }
352
353    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
354    pub fn with_since(mut self, since: impl Into<String>) -> Self {
355        self.since = Some(since.into());
356        self
357    }
358
359    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
360    pub fn with_until(mut self, until: impl Into<String>) -> Self {
361        self.until = Some(until.into());
362        self
363    }
364
365    /// CE-10: set retrieval routing mode
366    pub fn with_routing(mut self, routing: RoutingMode) -> Self {
367        self.routing = Some(routing);
368        self
369    }
370
371    /// CE-13: enable or disable cross-encoder reranking (server default: true for recall)
372    pub fn with_rerank(mut self, rerank: bool) -> Self {
373        self.rerank = Some(rerank);
374        self
375    }
376
377    /// KG-3: set KG traversal depth (1–3, default: 1); implies include_associated
378    pub fn with_associated_depth(mut self, depth: u8) -> Self {
379        self.include_associated = true;
380        self.associated_memories_depth = Some(depth);
381        self
382    }
383
384    /// KG-3: set minimum edge weight for KG traversal (default: 0.0)
385    pub fn with_associated_min_weight(mut self, weight: f32) -> Self {
386        self.associated_memories_min_weight = Some(weight);
387        self
388    }
389
390    /// CE-14: set fusion strategy for hybrid recall (server default: `Rrf`)
391    pub fn with_fusion(mut self, fusion: FusionStrategy) -> Self {
392        self.fusion = Some(fusion);
393        self
394    }
395
396    /// v0.11.0: enable or disable session-adjacent neighborhood enrichment
397    /// (server default: `true`). Set to `false` for latency-sensitive paths.
398    pub fn with_neighborhood(mut self, neighborhood: bool) -> Self {
399        self.neighborhood = Some(neighborhood);
400        self
401    }
402}
403
404/// A recalled memory
405#[derive(Debug, Clone, Serialize)]
406pub struct RecalledMemory {
407    pub id: String,
408    pub content: String,
409    pub memory_type: MemoryType,
410    pub importance: f32,
411    pub score: f32,
412    #[serde(default)]
413    pub tags: Vec<String>,
414    #[serde(skip_serializing_if = "Option::is_none")]
415    pub session_id: Option<String>,
416    #[serde(skip_serializing_if = "Option::is_none")]
417    pub metadata: Option<serde_json::Value>,
418    pub created_at: u64,
419    pub last_accessed_at: u64,
420    pub access_count: u32,
421    /// KG-3: hop depth at which this memory was found (only set on associated memories)
422    #[serde(skip_serializing_if = "Option::is_none")]
423    pub depth: Option<u8>,
424}
425
426impl<'de> serde::Deserialize<'de> for RecalledMemory {
427    fn deserialize<D: serde::Deserializer<'de>>(
428        deserializer: D,
429    ) -> std::result::Result<Self, D::Error> {
430        use serde::de::Error as _;
431        let val = serde_json::Value::deserialize(deserializer)?;
432
433        // Server wraps recall results as {memory:{...}, score, weighted_score, smart_score}.
434        // Fall back to flat format for direct memory-get responses.
435        let score = val
436            .get("score")
437            .and_then(|v| v.as_f64())
438            .or_else(|| val.get("weighted_score").and_then(|v| v.as_f64()))
439            .unwrap_or(0.0) as f32;
440
441        let mem = val.get("memory").unwrap_or(&val);
442
443        let id = mem
444            .get("id")
445            .and_then(|v| v.as_str())
446            .ok_or_else(|| D::Error::missing_field("id"))?
447            .to_string();
448        let content = mem
449            .get("content")
450            .and_then(|v| v.as_str())
451            .ok_or_else(|| D::Error::missing_field("content"))?
452            .to_string();
453        let memory_type: MemoryType = mem
454            .get("memory_type")
455            .and_then(|v| serde_json::from_value(v.clone()).ok())
456            .unwrap_or(MemoryType::Episodic);
457        let importance = mem
458            .get("importance")
459            .and_then(|v| v.as_f64())
460            .unwrap_or(0.5) as f32;
461        let tags: Vec<String> = mem
462            .get("tags")
463            .and_then(|v| serde_json::from_value(v.clone()).ok())
464            .unwrap_or_default();
465        let session_id = mem
466            .get("session_id")
467            .and_then(|v| v.as_str())
468            .map(String::from);
469        let metadata = mem.get("metadata").cloned().filter(|v| !v.is_null());
470        let created_at = mem.get("created_at").and_then(|v| v.as_u64()).unwrap_or(0);
471        let last_accessed_at = mem
472            .get("last_accessed_at")
473            .and_then(|v| v.as_u64())
474            .unwrap_or(0);
475        let access_count = mem
476            .get("access_count")
477            .and_then(|v| v.as_u64())
478            .unwrap_or(0) as u32;
479        let depth = mem.get("depth").and_then(|v| v.as_u64()).map(|v| v as u8);
480
481        Ok(Self {
482            id,
483            content,
484            memory_type,
485            importance,
486            score,
487            tags,
488            session_id,
489            metadata,
490            created_at,
491            last_accessed_at,
492            access_count,
493            depth,
494        })
495    }
496}
497
498/// Recall response
499#[derive(Debug, Clone, Serialize, Deserialize)]
500pub struct RecallResponse {
501    pub memories: Vec<RecalledMemory>,
502    #[serde(default)]
503    pub total_found: usize,
504    /// COG-2 / KG-3: KG associated memories at configurable depth (only present when include_associated was true)
505    #[serde(skip_serializing_if = "Option::is_none")]
506    pub associated_memories: Option<Vec<RecalledMemory>>,
507}
508
509/// Forget (delete) memories request
510#[derive(Debug, Clone, Serialize, Deserialize)]
511pub struct ForgetRequest {
512    pub agent_id: String,
513    #[serde(default)]
514    pub memory_ids: Vec<String>,
515    #[serde(default)]
516    pub tags: Vec<String>,
517    #[serde(skip_serializing_if = "Option::is_none")]
518    pub session_id: Option<String>,
519    #[serde(skip_serializing_if = "Option::is_none")]
520    pub before_timestamp: Option<u64>,
521}
522
523impl ForgetRequest {
524    /// Forget specific memories by ID
525    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
526        Self {
527            agent_id: agent_id.into(),
528            memory_ids: ids,
529            tags: Vec::new(),
530            session_id: None,
531            before_timestamp: None,
532        }
533    }
534
535    /// Forget memories with specific tags
536    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
537        Self {
538            agent_id: agent_id.into(),
539            memory_ids: Vec::new(),
540            tags,
541            session_id: None,
542            before_timestamp: None,
543        }
544    }
545
546    /// Forget all memories in a session
547    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
548        Self {
549            agent_id: agent_id.into(),
550            memory_ids: Vec::new(),
551            tags: Vec::new(),
552            session_id: Some(session_id.into()),
553            before_timestamp: None,
554        }
555    }
556}
557
558/// Forget response
559#[derive(Debug, Clone, Serialize, Deserialize)]
560pub struct ForgetResponse {
561    pub deleted_count: u64,
562}
563
564/// Session start request
565#[derive(Debug, Clone, Serialize, Deserialize)]
566pub struct SessionStartRequest {
567    pub agent_id: String,
568    #[serde(skip_serializing_if = "Option::is_none")]
569    pub metadata: Option<serde_json::Value>,
570}
571
572/// Session information
573#[derive(Debug, Clone, Serialize, Deserialize)]
574pub struct Session {
575    pub id: String,
576    pub agent_id: String,
577    pub started_at: u64,
578    #[serde(skip_serializing_if = "Option::is_none")]
579    pub ended_at: Option<u64>,
580    #[serde(skip_serializing_if = "Option::is_none")]
581    pub summary: Option<String>,
582    #[serde(skip_serializing_if = "Option::is_none")]
583    pub metadata: Option<serde_json::Value>,
584    /// Cached count of memories in this session
585    #[serde(default)]
586    pub memory_count: usize,
587}
588
589/// Session end request
590#[derive(Debug, Clone, Serialize, Deserialize)]
591pub struct SessionEndRequest {
592    #[serde(skip_serializing_if = "Option::is_none")]
593    pub summary: Option<String>,
594}
595
596/// Response from `POST /v1/sessions/start`
597#[derive(Debug, Clone, Serialize, Deserialize)]
598pub struct SessionStartResponse {
599    pub session: Session,
600}
601
602/// Response from `POST /v1/sessions/{id}/end`
603#[derive(Debug, Clone, Serialize, Deserialize)]
604pub struct SessionEndResponse {
605    pub session: Session,
606    pub memory_count: usize,
607}
608
609/// Request to update a memory
610#[derive(Debug, Clone, Serialize, Deserialize)]
611pub struct UpdateMemoryRequest {
612    #[serde(skip_serializing_if = "Option::is_none")]
613    pub content: Option<String>,
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub metadata: Option<serde_json::Value>,
616    #[serde(skip_serializing_if = "Option::is_none")]
617    pub memory_type: Option<MemoryType>,
618}
619
620/// Request to update memory importance
621#[derive(Debug, Clone, Serialize, Deserialize)]
622pub struct UpdateImportanceRequest {
623    pub memory_ids: Vec<String>,
624    pub importance: f32,
625}
626
627/// DBSCAN algorithm config for adaptive consolidation (CE-6).
628#[derive(Debug, Clone, Serialize, Deserialize, Default)]
629pub struct ConsolidationConfig {
630    /// Clustering algorithm: `"dbscan"` (default) or `"greedy"`.
631    #[serde(skip_serializing_if = "Option::is_none")]
632    pub algorithm: Option<String>,
633    /// Minimum cluster samples for DBSCAN.
634    #[serde(skip_serializing_if = "Option::is_none")]
635    pub min_samples: Option<u32>,
636    /// Epsilon distance parameter for DBSCAN.
637    #[serde(skip_serializing_if = "Option::is_none")]
638    pub eps: Option<f32>,
639}
640
641/// One step in the consolidation execution log (CE-6).
642#[derive(Debug, Clone, Serialize, Deserialize)]
643pub struct ConsolidationLogEntry {
644    pub step: String,
645    pub memories_before: usize,
646    pub memories_after: usize,
647    pub duration_ms: f64,
648}
649
650/// Request to consolidate memories
651#[derive(Debug, Clone, Serialize, Deserialize, Default)]
652pub struct ConsolidateRequest {
653    #[serde(skip_serializing_if = "Option::is_none")]
654    pub memory_type: Option<String>,
655    #[serde(skip_serializing_if = "Option::is_none")]
656    pub threshold: Option<f32>,
657    #[serde(default)]
658    pub dry_run: bool,
659    /// Optional DBSCAN algorithm configuration (CE-6).
660    #[serde(skip_serializing_if = "Option::is_none")]
661    pub config: Option<ConsolidationConfig>,
662}
663
664/// Response from consolidation (`POST /v1/memory/consolidate`).
665///
666/// The server returns `{"memories_removed": N, "source_memory_ids": [...], "consolidated_memory": {...}}`.
667/// `consolidated_count` is mapped from `memories_removed` for backward compat.
668#[derive(Debug, Clone, Serialize)]
669pub struct ConsolidateResponse {
670    /// Number of source memories removed (= `memories_removed` from server)
671    pub consolidated_count: usize,
672    /// Alias for consolidated_count
673    pub removed_count: usize,
674    /// IDs of source memories that were removed
675    #[serde(default)]
676    pub new_memories: Vec<String>,
677    /// Step-by-step consolidation log (CE-6, optional).
678    #[serde(default, skip_serializing_if = "Vec::is_empty")]
679    pub log: Vec<ConsolidationLogEntry>,
680}
681
682impl<'de> serde::Deserialize<'de> for ConsolidateResponse {
683    fn deserialize<D: serde::Deserializer<'de>>(
684        deserializer: D,
685    ) -> std::result::Result<Self, D::Error> {
686        let val = serde_json::Value::deserialize(deserializer)?;
687        // Server format: {"consolidated_memory":{...}, "source_memory_ids":[...], "memories_removed": N}
688        let removed = val
689            .get("memories_removed")
690            .and_then(|v| v.as_u64())
691            .or_else(|| val.get("removed_count").and_then(|v| v.as_u64()))
692            .or_else(|| val.get("consolidated_count").and_then(|v| v.as_u64()))
693            .unwrap_or(0) as usize;
694        let source_ids: Vec<String> = val
695            .get("source_memory_ids")
696            .and_then(|v| v.as_array())
697            .map(|arr| {
698                arr.iter()
699                    .filter_map(|v| v.as_str().map(String::from))
700                    .collect()
701            })
702            .unwrap_or_default();
703        Ok(Self {
704            consolidated_count: removed,
705            removed_count: removed,
706            new_memories: source_ids,
707            log: vec![],
708        })
709    }
710}
711
712// ============================================================================
713// DX-1: Memory Import / Export
714// ============================================================================
715
716/// Response from `POST /v1/import` (DX-1).
717#[derive(Debug, Clone, Serialize, Deserialize)]
718pub struct MemoryImportResponse {
719    pub imported_count: usize,
720    pub skipped_count: usize,
721    #[serde(default)]
722    pub errors: Vec<String>,
723}
724
725/// Response from `GET /v1/export` (DX-1).
726#[derive(Debug, Clone, Serialize, Deserialize)]
727pub struct MemoryExportResponse {
728    pub data: Vec<serde_json::Value>,
729    pub format: String,
730    pub count: usize,
731}
732
733// ============================================================================
734// OBS-1: Business-Event Audit Log
735// ============================================================================
736
737/// A single business-event entry from the audit log (OBS-1).
738#[derive(Debug, Clone, Serialize, Deserialize)]
739pub struct AuditEvent {
740    pub id: String,
741    pub event_type: String,
742    #[serde(skip_serializing_if = "Option::is_none")]
743    pub agent_id: Option<String>,
744    #[serde(skip_serializing_if = "Option::is_none")]
745    pub namespace: Option<String>,
746    pub timestamp: u64,
747    #[serde(default)]
748    pub details: serde_json::Value,
749}
750
751/// Response from `GET /v1/audit` (OBS-1).
752#[derive(Debug, Clone, Serialize, Deserialize)]
753pub struct AuditListResponse {
754    pub events: Vec<AuditEvent>,
755    pub total: usize,
756    #[serde(skip_serializing_if = "Option::is_none")]
757    pub cursor: Option<String>,
758}
759
760/// Response from `POST /v1/audit/export` (OBS-1).
761#[derive(Debug, Clone, Serialize, Deserialize)]
762pub struct AuditExportResponse {
763    pub data: String,
764    pub format: String,
765    pub count: usize,
766}
767
768/// Query parameters for the audit log (OBS-1).
769#[derive(Debug, Clone, Serialize, Deserialize, Default)]
770pub struct AuditQuery {
771    #[serde(skip_serializing_if = "Option::is_none")]
772    pub agent_id: Option<String>,
773    #[serde(skip_serializing_if = "Option::is_none")]
774    pub event_type: Option<String>,
775    #[serde(skip_serializing_if = "Option::is_none")]
776    pub from: Option<u64>,
777    #[serde(skip_serializing_if = "Option::is_none")]
778    pub to: Option<u64>,
779    #[serde(skip_serializing_if = "Option::is_none")]
780    pub limit: Option<u32>,
781    #[serde(skip_serializing_if = "Option::is_none")]
782    pub cursor: Option<String>,
783}
784
785// ============================================================================
786// EXT-1: External Extraction Providers
787// ============================================================================
788
789/// Result from `POST /v1/extract` (EXT-1).
790#[derive(Debug, Clone, Serialize, Deserialize)]
791pub struct ExtractionResult {
792    pub entities: Vec<serde_json::Value>,
793    pub provider: String,
794    #[serde(skip_serializing_if = "Option::is_none")]
795    pub model: Option<String>,
796    pub duration_ms: f64,
797}
798
799/// Metadata for an available extraction provider (EXT-1).
800#[derive(Debug, Clone, Serialize, Deserialize)]
801pub struct ExtractionProviderInfo {
802    pub name: String,
803    pub available: bool,
804    #[serde(default)]
805    pub models: Vec<String>,
806}
807
808/// Response from `GET /v1/extract/providers` (EXT-1).
809#[derive(Debug, Clone, Serialize, Deserialize)]
810#[serde(untagged)]
811pub enum ExtractProvidersResponse {
812    List(Vec<ExtractionProviderInfo>),
813    Object {
814        providers: Vec<ExtractionProviderInfo>,
815    },
816}
817
818// ============================================================================
819// SEC-3: AES-256-GCM Encryption Key Rotation
820// ============================================================================
821
822/// Request body for `POST /v1/admin/encryption/rotate-key` (SEC-3).
823#[derive(Debug, Clone, Serialize, Deserialize)]
824pub struct RotateEncryptionKeyRequest {
825    /// New passphrase or 64-char hex key to rotate to.
826    pub new_key: String,
827    /// If set, rotate only memories in this namespace. Omit to rotate all.
828    #[serde(skip_serializing_if = "Option::is_none")]
829    pub namespace: Option<String>,
830}
831
832/// Response from `POST /v1/admin/encryption/rotate-key` (SEC-3).
833#[derive(Debug, Clone, Serialize, Deserialize)]
834pub struct RotateEncryptionKeyResponse {
835    pub rotated: usize,
836    pub skipped: usize,
837    #[serde(default)]
838    pub namespaces: Vec<String>,
839}
840
841/// Request for memory feedback
842#[derive(Debug, Clone, Serialize, Deserialize)]
843pub struct FeedbackRequest {
844    pub memory_id: String,
845    pub feedback: String,
846    #[serde(skip_serializing_if = "Option::is_none")]
847    pub relevance_score: Option<f32>,
848}
849
850/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
851#[derive(Debug, Clone, Serialize, Deserialize)]
852pub struct LegacyFeedbackResponse {
853    pub status: String,
854    pub updated_importance: Option<f32>,
855}
856
857// ============================================================================
858// CE-2: Batch Recall / Forget Types
859// ============================================================================
860
861/// Filter predicates for batch memory operations (CE-2).
862///
863/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
864/// set (server-side safety guard).
865#[derive(Debug, Clone, Serialize, Deserialize, Default)]
866pub struct BatchMemoryFilter {
867    /// Restrict to memories that carry **all** listed tags.
868    #[serde(skip_serializing_if = "Option::is_none")]
869    pub tags: Option<Vec<String>>,
870    /// Minimum importance (inclusive).
871    #[serde(skip_serializing_if = "Option::is_none")]
872    pub min_importance: Option<f32>,
873    /// Maximum importance (inclusive).
874    #[serde(skip_serializing_if = "Option::is_none")]
875    pub max_importance: Option<f32>,
876    /// Only memories created at or after this Unix timestamp (seconds).
877    #[serde(skip_serializing_if = "Option::is_none")]
878    pub created_after: Option<u64>,
879    /// Only memories created before or at this Unix timestamp (seconds).
880    #[serde(skip_serializing_if = "Option::is_none")]
881    pub created_before: Option<u64>,
882    /// Restrict to a specific memory type.
883    #[serde(skip_serializing_if = "Option::is_none")]
884    pub memory_type: Option<MemoryType>,
885    /// Restrict to memories from a specific session.
886    #[serde(skip_serializing_if = "Option::is_none")]
887    pub session_id: Option<String>,
888}
889
890impl BatchMemoryFilter {
891    /// Convenience: filter by tags.
892    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
893        self.tags = Some(tags);
894        self
895    }
896
897    /// Convenience: filter by minimum importance.
898    pub fn with_min_importance(mut self, min: f32) -> Self {
899        self.min_importance = Some(min);
900        self
901    }
902
903    /// Convenience: filter by maximum importance.
904    pub fn with_max_importance(mut self, max: f32) -> Self {
905        self.max_importance = Some(max);
906        self
907    }
908
909    /// Convenience: filter by session.
910    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
911        self.session_id = Some(session_id.into());
912        self
913    }
914}
915
916/// Request body for `POST /v1/memories/recall/batch`.
917#[derive(Debug, Clone, Serialize, Deserialize)]
918pub struct BatchRecallRequest {
919    /// Agent whose memory namespace to search.
920    pub agent_id: String,
921    /// Filter predicates to apply.
922    #[serde(default)]
923    pub filter: BatchMemoryFilter,
924    /// Maximum number of results to return (default: 100).
925    #[serde(default = "default_batch_limit")]
926    pub limit: usize,
927}
928
929fn default_batch_limit() -> usize {
930    100
931}
932
933impl BatchRecallRequest {
934    /// Create a new batch recall request for an agent.
935    pub fn new(agent_id: impl Into<String>) -> Self {
936        Self {
937            agent_id: agent_id.into(),
938            filter: BatchMemoryFilter::default(),
939            limit: 100,
940        }
941    }
942
943    /// Set filter predicates.
944    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
945        self.filter = filter;
946        self
947    }
948
949    /// Set result limit.
950    pub fn with_limit(mut self, limit: usize) -> Self {
951        self.limit = limit;
952        self
953    }
954}
955
956/// Response from `POST /v1/memories/recall/batch`.
957#[derive(Debug, Clone, Serialize, Deserialize)]
958pub struct BatchRecallResponse {
959    pub memories: Vec<RecalledMemory>,
960    /// Total memories in the agent namespace.
961    pub total: usize,
962    /// Number of memories that passed the filter.
963    pub filtered: usize,
964}
965
966/// Request body for `DELETE /v1/memories/forget/batch`.
967#[derive(Debug, Clone, Serialize, Deserialize)]
968pub struct BatchForgetRequest {
969    /// Agent whose memory namespace to purge from.
970    pub agent_id: String,
971    /// Filter predicates — **at least one must be set** (server safety guard).
972    pub filter: BatchMemoryFilter,
973}
974
975impl BatchForgetRequest {
976    /// Create a new batch forget request with the given filter.
977    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
978        Self {
979            agent_id: agent_id.into(),
980            filter,
981        }
982    }
983}
984
985/// Response from `DELETE /v1/memories/forget/batch`.
986#[derive(Debug, Clone, Serialize, Deserialize)]
987pub struct BatchForgetResponse {
988    pub deleted_count: usize,
989}
990
991// ============================================================================
992// Memory Client Methods
993// ============================================================================
994
995impl DakeraClient {
996    // ========================================================================
997    // Memory Operations
998    // ========================================================================
999
1000    /// Store a memory for an agent
1001    ///
1002    /// # Example
1003    ///
1004    /// ```rust,no_run
1005    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
1006    ///
1007    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1008    /// let client = DakeraClient::new("http://localhost:3000")?;
1009    ///
1010    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
1011    ///     .with_importance(0.8)
1012    ///     .with_tags(vec!["preferences".to_string()]);
1013    ///
1014    /// let response = client.store_memory(request).await?;
1015    /// println!("Stored memory: {}", response.memory_id);
1016    /// # Ok(())
1017    /// # }
1018    /// ```
1019    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
1020        let url = format!("{}/v1/memory/store", self.base_url);
1021        let response = self.client.post(&url).json(&request).send().await?;
1022        self.handle_response(response).await
1023    }
1024
1025    /// Recall memories by semantic query
1026    ///
1027    /// # Example
1028    ///
1029    /// ```rust,no_run
1030    /// use dakera_client::{DakeraClient, memory::RecallRequest};
1031    ///
1032    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1033    /// let client = DakeraClient::new("http://localhost:3000")?;
1034    ///
1035    /// let request = RecallRequest::new("agent-1", "user preferences")
1036    ///     .with_top_k(10);
1037    ///
1038    /// let response = client.recall(request).await?;
1039    /// for memory in response.memories {
1040    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
1041    /// }
1042    /// # Ok(())
1043    /// # }
1044    /// ```
1045    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
1046        let url = format!("{}/v1/memory/recall", self.base_url);
1047        let response = self.client.post(&url).json(&request).send().await?;
1048        self.handle_response(response).await
1049    }
1050
1051    /// Simple recall with just agent_id and query (convenience method)
1052    pub async fn recall_simple(
1053        &self,
1054        agent_id: &str,
1055        query: &str,
1056        top_k: usize,
1057    ) -> Result<RecallResponse> {
1058        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
1059            .await
1060    }
1061
1062    /// Get a specific memory by ID
1063    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
1064        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
1065        let response = self.client.get(&url).send().await?;
1066        self.handle_response(response).await
1067    }
1068
1069    /// Forget (delete) memories
1070    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
1071        let url = format!("{}/v1/memory/forget", self.base_url);
1072        let response = self.client.post(&url).json(&request).send().await?;
1073        self.handle_response(response).await
1074    }
1075
1076    /// Search memories with advanced filters
1077    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
1078        let url = format!("{}/v1/memory/search", self.base_url);
1079        let response = self.client.post(&url).json(&request).send().await?;
1080        self.handle_response(response).await
1081    }
1082
1083    /// Update an existing memory
1084    pub async fn update_memory(
1085        &self,
1086        agent_id: &str,
1087        memory_id: &str,
1088        request: UpdateMemoryRequest,
1089    ) -> Result<StoreMemoryResponse> {
1090        let url = format!(
1091            "{}/v1/agents/{}/memories/{}",
1092            self.base_url, agent_id, memory_id
1093        );
1094        let response = self.client.put(&url).json(&request).send().await?;
1095        self.handle_response(response).await
1096    }
1097
1098    /// Update importance of memories
1099    pub async fn update_importance(
1100        &self,
1101        agent_id: &str,
1102        request: UpdateImportanceRequest,
1103    ) -> Result<serde_json::Value> {
1104        let url = format!(
1105            "{}/v1/agents/{}/memories/importance",
1106            self.base_url, agent_id
1107        );
1108        let response = self.client.put(&url).json(&request).send().await?;
1109        self.handle_response(response).await
1110    }
1111
1112    /// Consolidate memories for an agent
1113    pub async fn consolidate(
1114        &self,
1115        agent_id: &str,
1116        request: ConsolidateRequest,
1117    ) -> Result<ConsolidateResponse> {
1118        // Server endpoint: POST /v1/memory/consolidate with agent_id in body
1119        let url = format!("{}/v1/memory/consolidate", self.base_url);
1120        let mut body = serde_json::to_value(&request)?;
1121        body["agent_id"] = serde_json::Value::String(agent_id.to_string());
1122        let response = self.client.post(&url).json(&body).send().await?;
1123        self.handle_response(response).await
1124    }
1125
1126    /// Submit feedback on a memory recall
1127    pub async fn memory_feedback(
1128        &self,
1129        agent_id: &str,
1130        request: FeedbackRequest,
1131    ) -> Result<LegacyFeedbackResponse> {
1132        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
1133        let response = self.client.post(&url).json(&request).send().await?;
1134        self.handle_response(response).await
1135    }
1136
1137    // ========================================================================
1138    // Memory Feedback Loop — INT-1
1139    // ========================================================================
1140
1141    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
1142    ///
1143    /// # Arguments
1144    /// * `memory_id` – The memory to give feedback on.
1145    /// * `agent_id` – The agent that owns the memory.
1146    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
1147    ///
1148    /// # Example
1149    /// ```no_run
1150    /// # use dakera_client::{DakeraClient, FeedbackSignal};
1151    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1152    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
1153    /// println!("new importance: {}", resp.new_importance);
1154    /// # Ok(()) }
1155    /// ```
1156    pub async fn feedback_memory(
1157        &self,
1158        memory_id: &str,
1159        agent_id: &str,
1160        signal: FeedbackSignal,
1161    ) -> Result<FeedbackResponse> {
1162        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1163        let body = MemoryFeedbackBody {
1164            agent_id: agent_id.to_string(),
1165            signal,
1166        };
1167        let response = self.client.post(&url).json(&body).send().await?;
1168        self.handle_response(response).await
1169    }
1170
1171    /// Get the full feedback history for a memory (INT-1).
1172    pub async fn get_memory_feedback_history(
1173        &self,
1174        memory_id: &str,
1175    ) -> Result<FeedbackHistoryResponse> {
1176        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1177        let response = self.client.get(&url).send().await?;
1178        self.handle_response(response).await
1179    }
1180
1181    /// Get aggregate feedback counts and health score for an agent (INT-1).
1182    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
1183        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
1184        let response = self.client.get(&url).send().await?;
1185        self.handle_response(response).await
1186    }
1187
1188    /// Directly override a memory's importance score (INT-1).
1189    ///
1190    /// # Arguments
1191    /// * `memory_id` – The memory to update.
1192    /// * `agent_id` – The agent that owns the memory.
1193    /// * `importance` – New importance value (0.0–1.0).
1194    pub async fn patch_memory_importance(
1195        &self,
1196        memory_id: &str,
1197        agent_id: &str,
1198        importance: f32,
1199    ) -> Result<FeedbackResponse> {
1200        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
1201        let body = MemoryImportancePatch {
1202            agent_id: agent_id.to_string(),
1203            importance,
1204        };
1205        let response = self.client.patch(&url).json(&body).send().await?;
1206        self.handle_response(response).await
1207    }
1208
1209    /// Get overall feedback health score for an agent (INT-1).
1210    ///
1211    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
1212    /// A higher score indicates a healthier, more relevant memory store.
1213    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
1214        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
1215        let response = self.client.get(&url).send().await?;
1216        self.handle_response(response).await
1217    }
1218
1219    // ========================================================================
1220    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
1221    // ========================================================================
1222
1223    /// Traverse the knowledge graph from a memory node.
1224    ///
1225    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1226    ///
1227    /// # Arguments
1228    /// * `memory_id` – Root memory ID to start traversal from.
1229    /// * `options` – Traversal options (depth, edge type filters).
1230    ///
1231    /// # Example
1232    /// ```no_run
1233    /// # use dakera_client::{DakeraClient, GraphOptions};
1234    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1235    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
1236    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
1237    /// # Ok(()) }
1238    /// ```
1239    pub async fn memory_graph(
1240        &self,
1241        memory_id: &str,
1242        options: GraphOptions,
1243    ) -> Result<MemoryGraph> {
1244        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
1245        let depth = options.depth.unwrap_or(1);
1246        url.push_str(&format!("?depth={}", depth));
1247        if let Some(types) = &options.types {
1248            let type_strs: Vec<String> = types
1249                .iter()
1250                .map(|t| {
1251                    serde_json::to_value(t)
1252                        .unwrap()
1253                        .as_str()
1254                        .unwrap_or("")
1255                        .to_string()
1256                })
1257                .collect();
1258            if !type_strs.is_empty() {
1259                url.push_str(&format!("&types={}", type_strs.join(",")));
1260            }
1261        }
1262        let response = self.client.get(&url).send().await?;
1263        self.handle_response(response).await
1264    }
1265
1266    /// Find the shortest path between two memories in the knowledge graph.
1267    ///
1268    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1269    ///
1270    /// # Example
1271    /// ```no_run
1272    /// # use dakera_client::DakeraClient;
1273    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1274    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
1275    /// println!("{} hops: {:?}", path.hops, path.path);
1276    /// # Ok(()) }
1277    /// ```
1278    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
1279        let url = format!(
1280            "{}/v1/memories/{}/path?target={}",
1281            self.base_url,
1282            source_id,
1283            urlencoding::encode(target_id)
1284        );
1285        let response = self.client.get(&url).send().await?;
1286        self.handle_response(response).await
1287    }
1288
1289    /// Create an explicit edge between two memories.
1290    ///
1291    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1292    ///
1293    /// # Example
1294    /// ```no_run
1295    /// # use dakera_client::{DakeraClient, EdgeType};
1296    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1297    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
1298    /// println!("Created edge: {}", resp.edge.id);
1299    /// # Ok(()) }
1300    /// ```
1301    pub async fn memory_link(
1302        &self,
1303        source_id: &str,
1304        target_id: &str,
1305        edge_type: EdgeType,
1306    ) -> Result<GraphLinkResponse> {
1307        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
1308        let request = GraphLinkRequest {
1309            target_id: target_id.to_string(),
1310            edge_type,
1311        };
1312        let response = self.client.post(&url).json(&request).send().await?;
1313        self.handle_response(response).await
1314    }
1315
1316    /// Export the full knowledge graph for an agent.
1317    ///
1318    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1319    ///
1320    /// # Arguments
1321    /// * `agent_id` – Agent whose graph to export.
1322    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
1323    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
1324        let url = format!(
1325            "{}/v1/agents/{}/graph/export?format={}",
1326            self.base_url, agent_id, format
1327        );
1328        let response = self.client.get(&url).send().await?;
1329        self.handle_response(response).await
1330    }
1331
1332    // ========================================================================
1333    // Session Operations
1334    // ========================================================================
1335
1336    /// Start a new session for an agent
1337    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
1338        let url = format!("{}/v1/sessions/start", self.base_url);
1339        let request = SessionStartRequest {
1340            agent_id: agent_id.to_string(),
1341            metadata: None,
1342        };
1343        let response = self.client.post(&url).json(&request).send().await?;
1344        let resp: SessionStartResponse = self.handle_response(response).await?;
1345        Ok(resp.session)
1346    }
1347
1348    /// Start a session with metadata
1349    pub async fn start_session_with_metadata(
1350        &self,
1351        agent_id: &str,
1352        metadata: serde_json::Value,
1353    ) -> Result<Session> {
1354        let url = format!("{}/v1/sessions/start", self.base_url);
1355        let request = SessionStartRequest {
1356            agent_id: agent_id.to_string(),
1357            metadata: Some(metadata),
1358        };
1359        let response = self.client.post(&url).json(&request).send().await?;
1360        let resp: SessionStartResponse = self.handle_response(response).await?;
1361        Ok(resp.session)
1362    }
1363
1364    /// End a session, optionally with a summary.
1365    /// Returns the session state and the total memory count at close.
1366    pub async fn end_session(
1367        &self,
1368        session_id: &str,
1369        summary: Option<String>,
1370    ) -> Result<SessionEndResponse> {
1371        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
1372        let request = SessionEndRequest { summary };
1373        let response = self.client.post(&url).json(&request).send().await?;
1374        self.handle_response(response).await
1375    }
1376
1377    /// Get a session by ID
1378    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
1379        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
1380        let response = self.client.get(&url).send().await?;
1381        self.handle_response(response).await
1382    }
1383
1384    /// List sessions for an agent
1385    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
1386        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
1387        let response = self.client.get(&url).send().await?;
1388        self.handle_response(response).await
1389    }
1390
1391    /// Get memories in a session
1392    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
1393        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
1394        let response = self.client.get(&url).send().await?;
1395        self.handle_response(response).await
1396    }
1397
1398    // ========================================================================
1399    // CE-2: Batch Recall / Forget
1400    // ========================================================================
1401
1402    /// Bulk-recall memories using filter predicates (CE-2).
1403    ///
1404    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
1405    ///
1406    /// # Example
1407    ///
1408    /// ```rust,no_run
1409    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
1410    ///
1411    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1412    /// let client = DakeraClient::new("http://localhost:3000")?;
1413    ///
1414    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
1415    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
1416    /// let resp = client.batch_recall(req).await?;
1417    /// println!("Found {} memories", resp.filtered);
1418    /// # Ok(())
1419    /// # }
1420    /// ```
1421    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
1422        let url = format!("{}/v1/memories/recall/batch", self.base_url);
1423        let response = self.client.post(&url).json(&request).send().await?;
1424        self.handle_response(response).await
1425    }
1426
1427    /// Bulk-delete memories using filter predicates (CE-2).
1428    ///
1429    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
1430    /// one filter predicate to be set as a safety guard.
1431    ///
1432    /// # Example
1433    ///
1434    /// ```rust,no_run
1435    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
1436    ///
1437    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1438    /// let client = DakeraClient::new("http://localhost:3000")?;
1439    ///
1440    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
1441    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
1442    /// println!("Deleted {} memories", resp.deleted_count);
1443    /// # Ok(())
1444    /// # }
1445    /// ```
1446    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
1447        let url = format!("{}/v1/memories/forget/batch", self.base_url);
1448        let response = self.client.delete(&url).json(&request).send().await?;
1449        self.handle_response(response).await
1450    }
1451
1452    // ========================================================================
1453    // DX-1: Memory Import / Export
1454    // ========================================================================
1455
1456    /// Import memories from an external format (DX-1).
1457    ///
1458    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1459    ///
1460    /// ```no_run
1461    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1462    /// let client = dakera_client::DakeraClient::new("http://localhost:3000")?;
1463    /// let data = serde_json::json!([{"content": "hello", "agent_id": "agent-1"}]);
1464    /// let resp = client.import_memories(data, "jsonl", None, None).await?;
1465    /// println!("Imported {} memories", resp.imported_count);
1466    /// # Ok(())
1467    /// # }
1468    /// ```
1469    pub async fn import_memories(
1470        &self,
1471        data: serde_json::Value,
1472        format: &str,
1473        agent_id: Option<&str>,
1474        namespace: Option<&str>,
1475    ) -> Result<MemoryImportResponse> {
1476        let mut body = serde_json::json!({"data": data, "format": format});
1477        if let Some(aid) = agent_id {
1478            body["agent_id"] = serde_json::Value::String(aid.to_string());
1479        }
1480        if let Some(ns) = namespace {
1481            body["namespace"] = serde_json::Value::String(ns.to_string());
1482        }
1483        let url = format!("{}/v1/import", self.base_url);
1484        let response = self.client.post(&url).json(&body).send().await?;
1485        self.handle_response(response).await
1486    }
1487
1488    /// Export memories in a portable format (DX-1).
1489    ///
1490    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1491    pub async fn export_memories(
1492        &self,
1493        format: &str,
1494        agent_id: Option<&str>,
1495        namespace: Option<&str>,
1496        limit: Option<u32>,
1497    ) -> Result<MemoryExportResponse> {
1498        let mut params = vec![("format", format.to_string())];
1499        if let Some(aid) = agent_id {
1500            params.push(("agent_id", aid.to_string()));
1501        }
1502        if let Some(ns) = namespace {
1503            params.push(("namespace", ns.to_string()));
1504        }
1505        if let Some(l) = limit {
1506            params.push(("limit", l.to_string()));
1507        }
1508        let url = format!("{}/v1/export", self.base_url);
1509        let response = self.client.get(&url).query(&params).send().await?;
1510        self.handle_response(response).await
1511    }
1512
1513    // ========================================================================
1514    // OBS-1: Business-Event Audit Log
1515    // ========================================================================
1516
1517    /// List paginated audit log entries (OBS-1).
1518    pub async fn list_audit_events(&self, query: AuditQuery) -> Result<AuditListResponse> {
1519        let url = format!("{}/v1/audit", self.base_url);
1520        let response = self.client.get(&url).query(&query).send().await?;
1521        self.handle_response(response).await
1522    }
1523
1524    /// Stream live audit events via SSE (OBS-1).
1525    ///
1526    /// Returns a [`tokio::sync::mpsc::Receiver`] that yields [`DakeraEvent`] results.
1527    pub async fn stream_audit_events(
1528        &self,
1529        agent_id: Option<&str>,
1530        event_type: Option<&str>,
1531    ) -> Result<tokio::sync::mpsc::Receiver<Result<crate::events::DakeraEvent>>> {
1532        let mut params: Vec<(&str, String)> = Vec::new();
1533        if let Some(aid) = agent_id {
1534            params.push(("agent_id", aid.to_string()));
1535        }
1536        if let Some(et) = event_type {
1537            params.push(("event_type", et.to_string()));
1538        }
1539        let base = format!("{}/v1/audit/stream", self.base_url);
1540        let url = if params.is_empty() {
1541            base
1542        } else {
1543            let qs = params
1544                .iter()
1545                .map(|(k, v)| format!("{}={}", k, urlencoding::encode(v)))
1546                .collect::<Vec<_>>()
1547                .join("&");
1548            format!("{}?{}", base, qs)
1549        };
1550        self.stream_sse(url).await
1551    }
1552
1553    /// Bulk-export audit log entries (OBS-1).
1554    pub async fn export_audit(
1555        &self,
1556        format: &str,
1557        agent_id: Option<&str>,
1558        event_type: Option<&str>,
1559        from_ts: Option<u64>,
1560        to_ts: Option<u64>,
1561    ) -> Result<AuditExportResponse> {
1562        let mut body = serde_json::json!({"format": format});
1563        if let Some(aid) = agent_id {
1564            body["agent_id"] = serde_json::Value::String(aid.to_string());
1565        }
1566        if let Some(et) = event_type {
1567            body["event_type"] = serde_json::Value::String(et.to_string());
1568        }
1569        if let Some(f) = from_ts {
1570            body["from"] = serde_json::Value::Number(f.into());
1571        }
1572        if let Some(t) = to_ts {
1573            body["to"] = serde_json::Value::Number(t.into());
1574        }
1575        let url = format!("{}/v1/audit/export", self.base_url);
1576        let response = self.client.post(&url).json(&body).send().await?;
1577        self.handle_response(response).await
1578    }
1579
1580    // ========================================================================
1581    // EXT-1: External Extraction Providers
1582    // ========================================================================
1583
1584    /// Extract entities from text using a pluggable provider (EXT-1).
1585    ///
1586    /// Provider hierarchy: per-request > namespace default > GLiNER (bundled).
1587    /// Supported providers: `"gliner"`, `"openai"`, `"anthropic"`, `"openrouter"`, `"ollama"`.
1588    pub async fn extract_text(
1589        &self,
1590        text: &str,
1591        namespace: Option<&str>,
1592        provider: Option<&str>,
1593        model: Option<&str>,
1594    ) -> Result<ExtractionResult> {
1595        let mut body = serde_json::json!({"text": text});
1596        if let Some(ns) = namespace {
1597            body["namespace"] = serde_json::Value::String(ns.to_string());
1598        }
1599        if let Some(p) = provider {
1600            body["provider"] = serde_json::Value::String(p.to_string());
1601        }
1602        if let Some(m) = model {
1603            body["model"] = serde_json::Value::String(m.to_string());
1604        }
1605        let url = format!("{}/v1/extract", self.base_url);
1606        let response = self.client.post(&url).json(&body).send().await?;
1607        self.handle_response(response).await
1608    }
1609
1610    /// List available extraction providers and their models (EXT-1).
1611    pub async fn list_extract_providers(&self) -> Result<Vec<ExtractionProviderInfo>> {
1612        let url = format!("{}/v1/extract/providers", self.base_url);
1613        let response = self.client.get(&url).send().await?;
1614        let result: ExtractProvidersResponse = self.handle_response(response).await?;
1615        Ok(match result {
1616            ExtractProvidersResponse::List(v) => v,
1617            ExtractProvidersResponse::Object { providers } => providers,
1618        })
1619    }
1620
1621    /// Set the default extraction provider for a namespace (EXT-1).
1622    pub async fn configure_namespace_extractor(
1623        &self,
1624        namespace: &str,
1625        provider: &str,
1626        model: Option<&str>,
1627    ) -> Result<serde_json::Value> {
1628        let mut body = serde_json::json!({"provider": provider});
1629        if let Some(m) = model {
1630            body["model"] = serde_json::Value::String(m.to_string());
1631        }
1632        let url = format!(
1633            "{}/v1/namespaces/{}/extractor",
1634            self.base_url,
1635            urlencoding::encode(namespace)
1636        );
1637        let response = self.client.patch(&url).json(&body).send().await?;
1638        self.handle_response(response).await
1639    }
1640
1641    // =========================================================================
1642    // SEC-3: AES-256-GCM Encryption Key Rotation
1643    // =========================================================================
1644
1645    /// Re-encrypt all memory content blobs with a new AES-256-GCM key (SEC-3).
1646    ///
1647    /// After this call the new key is active in the running process.
1648    /// The operator must update `DAKERA_ENCRYPTION_KEY` and restart to make
1649    /// the rotation durable across restarts.
1650    ///
1651    /// Requires Admin scope.
1652    ///
1653    /// # Arguments
1654    /// * `new_key` - New passphrase or 64-char hex key.
1655    /// * `namespace` - If `Some`, rotate only this namespace. `None` rotates all.
1656    pub async fn rotate_encryption_key(
1657        &self,
1658        new_key: &str,
1659        namespace: Option<&str>,
1660    ) -> Result<RotateEncryptionKeyResponse> {
1661        let body = RotateEncryptionKeyRequest {
1662            new_key: new_key.to_string(),
1663            namespace: namespace.map(|s| s.to_string()),
1664        };
1665        let url = format!("{}/v1/admin/encryption/rotate-key", self.base_url);
1666        let response = self.client.post(&url).json(&body).send().await?;
1667        self.handle_response(response).await
1668    }
1669}