Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22#[serde(rename_all = "lowercase")]
23pub enum MemoryType {
24    #[default]
25    Episodic,
26    Semantic,
27    Procedural,
28    Working,
29}
30
31/// Store a memory request
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct StoreMemoryRequest {
34    pub agent_id: String,
35    pub content: String,
36    #[serde(default)]
37    pub memory_type: MemoryType,
38    #[serde(default = "default_importance")]
39    pub importance: f32,
40    #[serde(default)]
41    pub tags: Vec<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub session_id: Option<String>,
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub metadata: Option<serde_json::Value>,
46    /// Optional TTL in seconds. The memory is hard-deleted after this many
47    /// seconds from creation.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub ttl_seconds: Option<u64>,
50    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
51    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
52    /// decay engine on expiry (DECAY-3).
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub expires_at: Option<u64>,
55}
56
57fn default_importance() -> f32 {
58    0.5
59}
60
61impl StoreMemoryRequest {
62    /// Create a new store memory request
63    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
64        Self {
65            agent_id: agent_id.into(),
66            content: content.into(),
67            memory_type: MemoryType::default(),
68            importance: 0.5,
69            tags: Vec::new(),
70            session_id: None,
71            metadata: None,
72            ttl_seconds: None,
73            expires_at: None,
74        }
75    }
76
77    /// Set memory type
78    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
79        self.memory_type = memory_type;
80        self
81    }
82
83    /// Set importance score
84    pub fn with_importance(mut self, importance: f32) -> Self {
85        self.importance = importance.clamp(0.0, 1.0);
86        self
87    }
88
89    /// Set tags
90    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
91        self.tags = tags;
92        self
93    }
94
95    /// Set session ID
96    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
97        self.session_id = Some(session_id.into());
98        self
99    }
100
101    /// Set metadata
102    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
103        self.metadata = Some(metadata);
104        self
105    }
106
107    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
108    /// from creation.
109    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
110        self.ttl_seconds = Some(ttl_seconds);
111        self
112    }
113
114    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
115    /// `ttl_seconds` when both are set (DECAY-3).
116    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
117        self.expires_at = Some(expires_at);
118        self
119    }
120}
121
122/// Stored memory response from `POST /v1/memory/store`.
123///
124/// The server wraps the memory in a nested `memory` object:
125/// `{"memory": {"id": "...", "agent_id": "...", ...}, "embedding_time_ms": N}`.
126/// The `memory_id` and `agent_id` fields are convenience accessors mapped from
127/// `memory.id` and `memory.agent_id` respectively.
128#[derive(Debug, Clone, Serialize)]
129pub struct StoreMemoryResponse {
130    /// Memory ID (mapped from `memory.id`)
131    pub memory_id: String,
132    /// Agent ID (mapped from `memory.agent_id`)
133    pub agent_id: String,
134    /// Namespace (mapped from `memory.namespace`, defaults to `"default"`)
135    pub namespace: String,
136    /// Embedding latency in milliseconds
137    pub embedding_time_ms: Option<u64>,
138}
139
140impl<'de> serde::Deserialize<'de> for StoreMemoryResponse {
141    fn deserialize<D: serde::Deserializer<'de>>(
142        deserializer: D,
143    ) -> std::result::Result<Self, D::Error> {
144        use serde::de::Error;
145        let val = serde_json::Value::deserialize(deserializer)?;
146
147        // Server response: {"memory": {"id":"...","agent_id":"...",...}, "embedding_time_ms": N}
148        if let Some(memory) = val.get("memory") {
149            let memory_id = memory
150                .get("id")
151                .and_then(|v| v.as_str())
152                .ok_or_else(|| D::Error::missing_field("memory.id"))?
153                .to_string();
154            let agent_id = memory
155                .get("agent_id")
156                .and_then(|v| v.as_str())
157                .unwrap_or("")
158                .to_string();
159            let namespace = memory
160                .get("namespace")
161                .and_then(|v| v.as_str())
162                .unwrap_or("default")
163                .to_string();
164            let embedding_time_ms = val.get("embedding_time_ms").and_then(|v| v.as_u64());
165            return Ok(Self {
166                memory_id,
167                agent_id,
168                namespace,
169                embedding_time_ms,
170            });
171        }
172
173        // Legacy / mock format: {"memory_id":"...","agent_id":"...","namespace":"..."}
174        let memory_id = val
175            .get("memory_id")
176            .and_then(|v| v.as_str())
177            .ok_or_else(|| D::Error::missing_field("memory_id"))?
178            .to_string();
179        let agent_id = val
180            .get("agent_id")
181            .and_then(|v| v.as_str())
182            .unwrap_or("")
183            .to_string();
184        let namespace = val
185            .get("namespace")
186            .and_then(|v| v.as_str())
187            .unwrap_or("default")
188            .to_string();
189        Ok(Self {
190            memory_id,
191            agent_id,
192            namespace,
193            embedding_time_ms: None,
194        })
195    }
196}
197
198/// Fusion strategy for hybrid recall (CE-14).
199///
200/// Controls how vector and BM25 scores are combined when `routing = Hybrid`.
201/// `MinMax` is the server default since v0.11.2 (CEO architecture decision, DAK-1948).
202/// `RecallRequest` sends `None` by default, so the server default applies automatically.
203#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
204#[serde(rename_all = "snake_case")]
205pub enum FusionStrategy {
206    /// Reciprocal Rank Fusion (Cormack et al., SIGIR 2009).
207    /// Formula: score(d) = Σ 1 / (k + rank(d)), k = 60.
208    /// This variant is the Rust `Default` for ergonomic use; pass `None` in
209    /// `RecallRequest` to let the server apply its own default (MinMax since v0.11.2).
210    #[default]
211    Rrf,
212    /// Weighted min-max normalization — server default since v0.11.2.
213    #[serde(rename = "minmax")]
214    MinMax,
215}
216
217/// Retrieval routing mode for recall and search (CE-10).
218///
219/// Controls which retrieval index the server uses. `Auto` (default) lets the
220/// server pick the best strategy based on the query.
221#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
222#[serde(rename_all = "snake_case")]
223pub enum RoutingMode {
224    /// Server picks the best strategy (default).
225    Auto,
226    /// Force ANN vector search (HNSW).
227    Vector,
228    /// Force BM25 full-text search.
229    Bm25,
230    /// Fuse ANN and BM25 scores (RRF).
231    Hybrid,
232}
233
234/// Recall memories request
235#[derive(Debug, Clone, Serialize, Deserialize)]
236pub struct RecallRequest {
237    pub agent_id: String,
238    pub query: String,
239    #[serde(default = "default_top_k")]
240    pub top_k: usize,
241    #[serde(skip_serializing_if = "Option::is_none")]
242    pub memory_type: Option<MemoryType>,
243    #[serde(default)]
244    pub min_importance: f32,
245    #[serde(skip_serializing_if = "Option::is_none")]
246    pub session_id: Option<String>,
247    #[serde(default)]
248    pub tags: Vec<String>,
249    /// COG-2: traverse KG depth-1 from recalled memories and include
250    /// associatively linked memories in the response (default: false)
251    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
252    pub include_associated: bool,
253    /// COG-2: max associated memories to return (default: 10, max: 10)
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub associated_memories_cap: Option<u32>,
256    /// KG-3: KG traversal depth 1–3 (default: 1); requires include_associated
257    #[serde(skip_serializing_if = "Option::is_none")]
258    pub associated_memories_depth: Option<u8>,
259    /// KG-3: minimum edge weight for KG traversal (default: 0.0)
260    #[serde(skip_serializing_if = "Option::is_none")]
261    pub associated_memories_min_weight: Option<f32>,
262    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
263    #[serde(skip_serializing_if = "Option::is_none")]
264    pub since: Option<String>,
265    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
266    #[serde(skip_serializing_if = "Option::is_none")]
267    pub until: Option<String>,
268    /// CE-10: retrieval routing mode. `None` uses the server default (`auto`).
269    #[serde(skip_serializing_if = "Option::is_none")]
270    pub routing: Option<RoutingMode>,
271    /// CE-13: cross-encoder reranking. `None` uses server default (`true` for recall,
272    /// `false` for search). Set to `Some(false)` to disable on latency-sensitive paths.
273    #[serde(skip_serializing_if = "Option::is_none")]
274    pub rerank: Option<bool>,
275    /// CE-14: fusion strategy when `routing = Hybrid`. `None` uses server default (`Rrf`).
276    #[serde(skip_serializing_if = "Option::is_none")]
277    pub fusion: Option<FusionStrategy>,
278    /// CE-17: explicit vector/BM25 weight for Hybrid routing (0.0–1.0).
279    /// When set, overrides the adaptive heuristic from `QueryClassifier`.
280    /// Omit for adaptive defaults (recommended for most callers).
281    /// Only effective when `routing = RoutingMode::Hybrid`.
282    #[serde(skip_serializing_if = "Option::is_none")]
283    pub vector_weight: Option<f32>,
284    /// CE-23: pseudo-relevance feedback (PRF) passes for BM25 routing (1–3, default: 1).
285    /// Pass `Some(2)` or `Some(3)` for multi-hop or temporal queries where a second
286    /// BM25 pass over extracted entities improves recall.
287    /// Only effective when `routing = RoutingMode::Bm25`.
288    #[serde(skip_serializing_if = "Option::is_none")]
289    pub iterations: Option<u8>,
290    /// v0.11.0: fetch session-adjacent memories within ±5 min of each top result.
291    /// `None` uses server default (`true`). Set to `Some(false)` to disable for
292    /// latency-sensitive paths.
293    #[serde(skip_serializing_if = "Option::is_none")]
294    pub neighborhood: Option<bool>,
295}
296
297fn default_top_k() -> usize {
298    5
299}
300
301impl RecallRequest {
302    /// Create a new recall request
303    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
304        Self {
305            agent_id: agent_id.into(),
306            query: query.into(),
307            top_k: 5,
308            memory_type: None,
309            min_importance: 0.0,
310            session_id: None,
311            tags: Vec::new(),
312            include_associated: false,
313            associated_memories_cap: None,
314            associated_memories_depth: None,
315            associated_memories_min_weight: None,
316            since: None,
317            until: None,
318            routing: None,
319            rerank: None,
320            fusion: None,
321            vector_weight: None,
322            iterations: None,
323            neighborhood: None,
324        }
325    }
326
327    /// Set number of results
328    pub fn with_top_k(mut self, top_k: usize) -> Self {
329        self.top_k = top_k;
330        self
331    }
332
333    /// Filter by memory type
334    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
335        self.memory_type = Some(memory_type);
336        self
337    }
338
339    /// Set minimum importance threshold
340    pub fn with_min_importance(mut self, min: f32) -> Self {
341        self.min_importance = min;
342        self
343    }
344
345    /// Filter by session
346    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
347        self.session_id = Some(session_id.into());
348        self
349    }
350
351    /// Filter by tags
352    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
353        self.tags = tags;
354        self
355    }
356
357    /// COG-2: include KG depth-1 associated memories in the response
358    pub fn with_associated(mut self) -> Self {
359        self.include_associated = true;
360        self
361    }
362
363    /// COG-2: set max associated memories cap (default: 10, max: 10)
364    pub fn with_associated_cap(mut self, cap: u32) -> Self {
365        self.include_associated = true;
366        self.associated_memories_cap = Some(cap);
367        self
368    }
369
370    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
371    pub fn with_since(mut self, since: impl Into<String>) -> Self {
372        self.since = Some(since.into());
373        self
374    }
375
376    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
377    pub fn with_until(mut self, until: impl Into<String>) -> Self {
378        self.until = Some(until.into());
379        self
380    }
381
382    /// CE-10: set retrieval routing mode
383    pub fn with_routing(mut self, routing: RoutingMode) -> Self {
384        self.routing = Some(routing);
385        self
386    }
387
388    /// CE-13: enable or disable cross-encoder reranking (server default: true for recall)
389    pub fn with_rerank(mut self, rerank: bool) -> Self {
390        self.rerank = Some(rerank);
391        self
392    }
393
394    /// KG-3: set KG traversal depth (1–3, default: 1); implies include_associated
395    pub fn with_associated_depth(mut self, depth: u8) -> Self {
396        self.include_associated = true;
397        self.associated_memories_depth = Some(depth);
398        self
399    }
400
401    /// KG-3: set minimum edge weight for KG traversal (default: 0.0)
402    pub fn with_associated_min_weight(mut self, weight: f32) -> Self {
403        self.associated_memories_min_weight = Some(weight);
404        self
405    }
406
407    /// CE-14: set fusion strategy for hybrid recall (server default: `Rrf`)
408    pub fn with_fusion(mut self, fusion: FusionStrategy) -> Self {
409        self.fusion = Some(fusion);
410        self
411    }
412
413    /// CE-17: set explicit vector/BM25 weight for Hybrid routing (0.0–1.0).
414    /// Overrides the adaptive heuristic from `QueryClassifier`.
415    /// Omit for adaptive defaults (recommended for most callers).
416    pub fn with_vector_weight(mut self, weight: f32) -> Self {
417        self.vector_weight = Some(weight);
418        self
419    }
420
421    /// CE-23: set PRF iteration count for BM25 routing (1–3, default: 1).
422    /// Pass `2` or `3` for multi-hop or temporal queries where a second BM25
423    /// pass over extracted entities improves recall.
424    /// Only effective when `routing = RoutingMode::Bm25`.
425    pub fn with_iterations(mut self, iterations: u8) -> Self {
426        self.iterations = Some(iterations);
427        self
428    }
429
430    /// v0.11.0: enable or disable session-adjacent neighborhood enrichment
431    /// (server default: `true`). Set to `false` for latency-sensitive paths.
432    pub fn with_neighborhood(mut self, neighborhood: bool) -> Self {
433        self.neighborhood = Some(neighborhood);
434        self
435    }
436}
437
438/// A recalled memory
439#[derive(Debug, Clone, Serialize)]
440pub struct RecalledMemory {
441    pub id: String,
442    pub content: String,
443    pub memory_type: MemoryType,
444    pub importance: f32,
445    pub score: f32,
446    #[serde(default)]
447    pub tags: Vec<String>,
448    #[serde(skip_serializing_if = "Option::is_none")]
449    pub session_id: Option<String>,
450    #[serde(skip_serializing_if = "Option::is_none")]
451    pub metadata: Option<serde_json::Value>,
452    pub created_at: u64,
453    pub last_accessed_at: u64,
454    pub access_count: u32,
455    /// KG-3: hop depth at which this memory was found (only set on associated memories)
456    #[serde(skip_serializing_if = "Option::is_none")]
457    pub depth: Option<u8>,
458}
459
460impl<'de> serde::Deserialize<'de> for RecalledMemory {
461    fn deserialize<D: serde::Deserializer<'de>>(
462        deserializer: D,
463    ) -> std::result::Result<Self, D::Error> {
464        use serde::de::Error as _;
465        let val = serde_json::Value::deserialize(deserializer)?;
466
467        // Server wraps recall results as {memory:{...}, score, weighted_score, smart_score}.
468        // Fall back to flat format for direct memory-get responses.
469        let score = val
470            .get("score")
471            .and_then(|v| v.as_f64())
472            .or_else(|| val.get("weighted_score").and_then(|v| v.as_f64()))
473            .unwrap_or(0.0) as f32;
474
475        let mem = val.get("memory").unwrap_or(&val);
476
477        let id = mem
478            .get("id")
479            .and_then(|v| v.as_str())
480            .ok_or_else(|| D::Error::missing_field("id"))?
481            .to_string();
482        let content = mem
483            .get("content")
484            .and_then(|v| v.as_str())
485            .ok_or_else(|| D::Error::missing_field("content"))?
486            .to_string();
487        let memory_type: MemoryType = mem
488            .get("memory_type")
489            .and_then(|v| serde_json::from_value(v.clone()).ok())
490            .unwrap_or(MemoryType::Episodic);
491        let importance = mem
492            .get("importance")
493            .and_then(|v| v.as_f64())
494            .unwrap_or(0.5) as f32;
495        let tags: Vec<String> = mem
496            .get("tags")
497            .and_then(|v| serde_json::from_value(v.clone()).ok())
498            .unwrap_or_default();
499        let session_id = mem
500            .get("session_id")
501            .and_then(|v| v.as_str())
502            .map(String::from);
503        let metadata = mem.get("metadata").cloned().filter(|v| !v.is_null());
504        let created_at = mem.get("created_at").and_then(|v| v.as_u64()).unwrap_or(0);
505        let last_accessed_at = mem
506            .get("last_accessed_at")
507            .and_then(|v| v.as_u64())
508            .unwrap_or(0);
509        let access_count = mem
510            .get("access_count")
511            .and_then(|v| v.as_u64())
512            .unwrap_or(0) as u32;
513        let depth = mem.get("depth").and_then(|v| v.as_u64()).map(|v| v as u8);
514
515        Ok(Self {
516            id,
517            content,
518            memory_type,
519            importance,
520            score,
521            tags,
522            session_id,
523            metadata,
524            created_at,
525            last_accessed_at,
526            access_count,
527            depth,
528        })
529    }
530}
531
532/// Recall response
533#[derive(Debug, Clone, Serialize, Deserialize)]
534pub struct RecallResponse {
535    pub memories: Vec<RecalledMemory>,
536    #[serde(default)]
537    pub total_found: usize,
538    /// COG-2 / KG-3: KG associated memories at configurable depth (only present when include_associated was true)
539    #[serde(skip_serializing_if = "Option::is_none")]
540    pub associated_memories: Option<Vec<RecalledMemory>>,
541}
542
543/// Forget (delete) memories request
544#[derive(Debug, Clone, Serialize, Deserialize)]
545pub struct ForgetRequest {
546    pub agent_id: String,
547    #[serde(default)]
548    pub memory_ids: Vec<String>,
549    #[serde(default)]
550    pub tags: Vec<String>,
551    #[serde(skip_serializing_if = "Option::is_none")]
552    pub session_id: Option<String>,
553    #[serde(skip_serializing_if = "Option::is_none")]
554    pub before_timestamp: Option<u64>,
555}
556
557impl ForgetRequest {
558    /// Forget specific memories by ID
559    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
560        Self {
561            agent_id: agent_id.into(),
562            memory_ids: ids,
563            tags: Vec::new(),
564            session_id: None,
565            before_timestamp: None,
566        }
567    }
568
569    /// Forget memories with specific tags
570    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
571        Self {
572            agent_id: agent_id.into(),
573            memory_ids: Vec::new(),
574            tags,
575            session_id: None,
576            before_timestamp: None,
577        }
578    }
579
580    /// Forget all memories in a session
581    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
582        Self {
583            agent_id: agent_id.into(),
584            memory_ids: Vec::new(),
585            tags: Vec::new(),
586            session_id: Some(session_id.into()),
587            before_timestamp: None,
588        }
589    }
590}
591
592/// Forget response
593#[derive(Debug, Clone, Serialize, Deserialize)]
594pub struct ForgetResponse {
595    pub deleted_count: u64,
596}
597
598/// Session start request
599#[derive(Debug, Clone, Serialize, Deserialize)]
600pub struct SessionStartRequest {
601    pub agent_id: String,
602    #[serde(skip_serializing_if = "Option::is_none")]
603    pub metadata: Option<serde_json::Value>,
604}
605
606/// Session information
607#[derive(Debug, Clone, Serialize, Deserialize)]
608pub struct Session {
609    pub id: String,
610    pub agent_id: String,
611    pub started_at: u64,
612    #[serde(skip_serializing_if = "Option::is_none")]
613    pub ended_at: Option<u64>,
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub summary: Option<String>,
616    #[serde(skip_serializing_if = "Option::is_none")]
617    pub metadata: Option<serde_json::Value>,
618    /// Cached count of memories in this session
619    #[serde(default)]
620    pub memory_count: usize,
621}
622
623/// Session end request
624#[derive(Debug, Clone, Serialize, Deserialize)]
625pub struct SessionEndRequest {
626    #[serde(skip_serializing_if = "Option::is_none")]
627    pub summary: Option<String>,
628}
629
630/// Response from `POST /v1/sessions/start`
631#[derive(Debug, Clone, Serialize, Deserialize)]
632pub struct SessionStartResponse {
633    pub session: Session,
634}
635
636/// Response from `POST /v1/sessions/{id}/end`
637#[derive(Debug, Clone, Serialize, Deserialize)]
638pub struct SessionEndResponse {
639    pub session: Session,
640    pub memory_count: usize,
641}
642
643/// Request to update a memory
644#[derive(Debug, Clone, Serialize, Deserialize)]
645pub struct UpdateMemoryRequest {
646    #[serde(skip_serializing_if = "Option::is_none")]
647    pub content: Option<String>,
648    #[serde(skip_serializing_if = "Option::is_none")]
649    pub metadata: Option<serde_json::Value>,
650    #[serde(skip_serializing_if = "Option::is_none")]
651    pub memory_type: Option<MemoryType>,
652}
653
654/// Request to update memory importance
655#[derive(Debug, Clone, Serialize, Deserialize)]
656pub struct UpdateImportanceRequest {
657    pub memory_ids: Vec<String>,
658    pub importance: f32,
659}
660
661/// DBSCAN algorithm config for adaptive consolidation (CE-6).
662#[derive(Debug, Clone, Serialize, Deserialize, Default)]
663pub struct ConsolidationConfig {
664    /// Clustering algorithm: `"dbscan"` (default) or `"greedy"`.
665    #[serde(skip_serializing_if = "Option::is_none")]
666    pub algorithm: Option<String>,
667    /// Minimum cluster samples for DBSCAN.
668    #[serde(skip_serializing_if = "Option::is_none")]
669    pub min_samples: Option<u32>,
670    /// Epsilon distance parameter for DBSCAN.
671    #[serde(skip_serializing_if = "Option::is_none")]
672    pub eps: Option<f32>,
673}
674
675/// One step in the consolidation execution log (CE-6).
676#[derive(Debug, Clone, Serialize, Deserialize)]
677pub struct ConsolidationLogEntry {
678    pub step: String,
679    pub memories_before: usize,
680    pub memories_after: usize,
681    pub duration_ms: f64,
682}
683
684/// Request to consolidate memories
685#[derive(Debug, Clone, Serialize, Deserialize, Default)]
686pub struct ConsolidateRequest {
687    #[serde(skip_serializing_if = "Option::is_none")]
688    pub memory_type: Option<String>,
689    #[serde(skip_serializing_if = "Option::is_none")]
690    pub threshold: Option<f32>,
691    #[serde(default)]
692    pub dry_run: bool,
693    /// Optional DBSCAN algorithm configuration (CE-6).
694    #[serde(skip_serializing_if = "Option::is_none")]
695    pub config: Option<ConsolidationConfig>,
696}
697
698/// Response from consolidation (`POST /v1/memory/consolidate`).
699///
700/// The server returns `{"memories_removed": N, "source_memory_ids": [...], "consolidated_memory": {...}}`.
701/// `consolidated_count` is mapped from `memories_removed` for backward compat.
702#[derive(Debug, Clone, Serialize)]
703pub struct ConsolidateResponse {
704    /// Number of source memories removed (= `memories_removed` from server)
705    pub consolidated_count: usize,
706    /// Alias for consolidated_count
707    pub removed_count: usize,
708    /// IDs of source memories that were removed
709    #[serde(default)]
710    pub new_memories: Vec<String>,
711    /// Step-by-step consolidation log (CE-6, optional).
712    #[serde(default, skip_serializing_if = "Vec::is_empty")]
713    pub log: Vec<ConsolidationLogEntry>,
714}
715
716impl<'de> serde::Deserialize<'de> for ConsolidateResponse {
717    fn deserialize<D: serde::Deserializer<'de>>(
718        deserializer: D,
719    ) -> std::result::Result<Self, D::Error> {
720        let val = serde_json::Value::deserialize(deserializer)?;
721        // Server format: {"consolidated_memory":{...}, "source_memory_ids":[...], "memories_removed": N}
722        let removed = val
723            .get("memories_removed")
724            .and_then(|v| v.as_u64())
725            .or_else(|| val.get("removed_count").and_then(|v| v.as_u64()))
726            .or_else(|| val.get("consolidated_count").and_then(|v| v.as_u64()))
727            .unwrap_or(0) as usize;
728        let source_ids: Vec<String> = val
729            .get("source_memory_ids")
730            .and_then(|v| v.as_array())
731            .map(|arr| {
732                arr.iter()
733                    .filter_map(|v| v.as_str().map(String::from))
734                    .collect()
735            })
736            .unwrap_or_default();
737        Ok(Self {
738            consolidated_count: removed,
739            removed_count: removed,
740            new_memories: source_ids,
741            log: vec![],
742        })
743    }
744}
745
746// ============================================================================
747// DX-1: Memory Import / Export
748// ============================================================================
749
750/// Response from `POST /v1/import` (DX-1).
751#[derive(Debug, Clone, Serialize, Deserialize)]
752pub struct MemoryImportResponse {
753    pub imported_count: usize,
754    pub skipped_count: usize,
755    #[serde(default)]
756    pub errors: Vec<String>,
757}
758
759/// Response from `GET /v1/export` (DX-1).
760#[derive(Debug, Clone, Serialize, Deserialize)]
761pub struct MemoryExportResponse {
762    pub data: Vec<serde_json::Value>,
763    pub format: String,
764    pub count: usize,
765}
766
767// ============================================================================
768// OBS-1: Business-Event Audit Log
769// ============================================================================
770
771/// A single business-event entry from the audit log (OBS-1).
772#[derive(Debug, Clone, Serialize, Deserialize)]
773pub struct AuditEvent {
774    pub id: String,
775    pub event_type: String,
776    #[serde(skip_serializing_if = "Option::is_none")]
777    pub agent_id: Option<String>,
778    #[serde(skip_serializing_if = "Option::is_none")]
779    pub namespace: Option<String>,
780    pub timestamp: u64,
781    #[serde(default)]
782    pub details: serde_json::Value,
783}
784
785/// Response from `GET /v1/audit` (OBS-1).
786#[derive(Debug, Clone, Serialize, Deserialize)]
787pub struct AuditListResponse {
788    pub events: Vec<AuditEvent>,
789    pub total: usize,
790    #[serde(skip_serializing_if = "Option::is_none")]
791    pub cursor: Option<String>,
792}
793
794/// Response from `POST /v1/audit/export` (OBS-1).
795#[derive(Debug, Clone, Serialize, Deserialize)]
796pub struct AuditExportResponse {
797    pub data: String,
798    pub format: String,
799    pub count: usize,
800}
801
802/// Query parameters for the audit log (OBS-1).
803#[derive(Debug, Clone, Serialize, Deserialize, Default)]
804pub struct AuditQuery {
805    #[serde(skip_serializing_if = "Option::is_none")]
806    pub agent_id: Option<String>,
807    #[serde(skip_serializing_if = "Option::is_none")]
808    pub event_type: Option<String>,
809    #[serde(skip_serializing_if = "Option::is_none")]
810    pub from: Option<u64>,
811    #[serde(skip_serializing_if = "Option::is_none")]
812    pub to: Option<u64>,
813    #[serde(skip_serializing_if = "Option::is_none")]
814    pub limit: Option<u32>,
815    #[serde(skip_serializing_if = "Option::is_none")]
816    pub cursor: Option<String>,
817}
818
819// ============================================================================
820// EXT-1: External Extraction Providers
821// ============================================================================
822
823/// Result from `POST /v1/extract` (EXT-1).
824#[derive(Debug, Clone, Serialize, Deserialize)]
825pub struct ExtractionResult {
826    pub entities: Vec<serde_json::Value>,
827    pub provider: String,
828    #[serde(skip_serializing_if = "Option::is_none")]
829    pub model: Option<String>,
830    pub duration_ms: f64,
831}
832
833/// Metadata for an available extraction provider (EXT-1).
834#[derive(Debug, Clone, Serialize, Deserialize)]
835pub struct ExtractionProviderInfo {
836    pub name: String,
837    pub available: bool,
838    #[serde(default)]
839    pub models: Vec<String>,
840}
841
842/// Response from `GET /v1/extract/providers` (EXT-1).
843#[derive(Debug, Clone, Serialize, Deserialize)]
844#[serde(untagged)]
845pub enum ExtractProvidersResponse {
846    List(Vec<ExtractionProviderInfo>),
847    Object {
848        providers: Vec<ExtractionProviderInfo>,
849    },
850}
851
852// ============================================================================
853// SEC-3: AES-256-GCM Encryption Key Rotation
854// ============================================================================
855
856/// Request body for `POST /v1/admin/encryption/rotate-key` (SEC-3).
857#[derive(Debug, Clone, Serialize, Deserialize)]
858pub struct RotateEncryptionKeyRequest {
859    /// New passphrase or 64-char hex key to rotate to.
860    pub new_key: String,
861    /// If set, rotate only memories in this namespace. Omit to rotate all.
862    #[serde(skip_serializing_if = "Option::is_none")]
863    pub namespace: Option<String>,
864}
865
866/// Response from `POST /v1/admin/encryption/rotate-key` (SEC-3).
867#[derive(Debug, Clone, Serialize, Deserialize)]
868pub struct RotateEncryptionKeyResponse {
869    pub rotated: usize,
870    pub skipped: usize,
871    #[serde(default)]
872    pub namespaces: Vec<String>,
873}
874
875/// Request for memory feedback
876#[derive(Debug, Clone, Serialize, Deserialize)]
877pub struct FeedbackRequest {
878    pub memory_id: String,
879    pub feedback: String,
880    #[serde(skip_serializing_if = "Option::is_none")]
881    pub relevance_score: Option<f32>,
882}
883
884/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
885#[derive(Debug, Clone, Serialize, Deserialize)]
886pub struct LegacyFeedbackResponse {
887    pub status: String,
888    pub updated_importance: Option<f32>,
889}
890
891// ============================================================================
892// CE-2: Batch Recall / Forget Types
893// ============================================================================
894
895/// Filter predicates for batch memory operations (CE-2).
896///
897/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
898/// set (server-side safety guard).
899#[derive(Debug, Clone, Serialize, Deserialize, Default)]
900pub struct BatchMemoryFilter {
901    /// Restrict to memories that carry **all** listed tags.
902    #[serde(skip_serializing_if = "Option::is_none")]
903    pub tags: Option<Vec<String>>,
904    /// Minimum importance (inclusive).
905    #[serde(skip_serializing_if = "Option::is_none")]
906    pub min_importance: Option<f32>,
907    /// Maximum importance (inclusive).
908    #[serde(skip_serializing_if = "Option::is_none")]
909    pub max_importance: Option<f32>,
910    /// Only memories created at or after this Unix timestamp (seconds).
911    #[serde(skip_serializing_if = "Option::is_none")]
912    pub created_after: Option<u64>,
913    /// Only memories created before or at this Unix timestamp (seconds).
914    #[serde(skip_serializing_if = "Option::is_none")]
915    pub created_before: Option<u64>,
916    /// Restrict to a specific memory type.
917    #[serde(skip_serializing_if = "Option::is_none")]
918    pub memory_type: Option<MemoryType>,
919    /// Restrict to memories from a specific session.
920    #[serde(skip_serializing_if = "Option::is_none")]
921    pub session_id: Option<String>,
922}
923
924impl BatchMemoryFilter {
925    /// Convenience: filter by tags.
926    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
927        self.tags = Some(tags);
928        self
929    }
930
931    /// Convenience: filter by minimum importance.
932    pub fn with_min_importance(mut self, min: f32) -> Self {
933        self.min_importance = Some(min);
934        self
935    }
936
937    /// Convenience: filter by maximum importance.
938    pub fn with_max_importance(mut self, max: f32) -> Self {
939        self.max_importance = Some(max);
940        self
941    }
942
943    /// Convenience: filter by session.
944    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
945        self.session_id = Some(session_id.into());
946        self
947    }
948}
949
950/// Request body for `POST /v1/memories/recall/batch`.
951#[derive(Debug, Clone, Serialize, Deserialize)]
952pub struct BatchRecallRequest {
953    /// Agent whose memory namespace to search.
954    pub agent_id: String,
955    /// Filter predicates to apply.
956    #[serde(default)]
957    pub filter: BatchMemoryFilter,
958    /// Maximum number of results to return (default: 100).
959    #[serde(default = "default_batch_limit")]
960    pub limit: usize,
961}
962
963fn default_batch_limit() -> usize {
964    100
965}
966
967impl BatchRecallRequest {
968    /// Create a new batch recall request for an agent.
969    pub fn new(agent_id: impl Into<String>) -> Self {
970        Self {
971            agent_id: agent_id.into(),
972            filter: BatchMemoryFilter::default(),
973            limit: 100,
974        }
975    }
976
977    /// Set filter predicates.
978    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
979        self.filter = filter;
980        self
981    }
982
983    /// Set result limit.
984    pub fn with_limit(mut self, limit: usize) -> Self {
985        self.limit = limit;
986        self
987    }
988}
989
990/// Response from `POST /v1/memories/recall/batch`.
991#[derive(Debug, Clone, Serialize, Deserialize)]
992pub struct BatchRecallResponse {
993    pub memories: Vec<RecalledMemory>,
994    /// Total memories in the agent namespace.
995    pub total: usize,
996    /// Number of memories that passed the filter.
997    pub filtered: usize,
998}
999
1000/// Request body for `DELETE /v1/memories/forget/batch`.
1001#[derive(Debug, Clone, Serialize, Deserialize)]
1002pub struct BatchForgetRequest {
1003    /// Agent whose memory namespace to purge from.
1004    pub agent_id: String,
1005    /// Filter predicates — **at least one must be set** (server safety guard).
1006    pub filter: BatchMemoryFilter,
1007}
1008
1009impl BatchForgetRequest {
1010    /// Create a new batch forget request with the given filter.
1011    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
1012        Self {
1013            agent_id: agent_id.into(),
1014            filter,
1015        }
1016    }
1017}
1018
1019/// Response from `DELETE /v1/memories/forget/batch`.
1020#[derive(Debug, Clone, Serialize, Deserialize)]
1021pub struct BatchForgetResponse {
1022    pub deleted_count: usize,
1023}
1024
1025// ============================================================================
1026// Memory Client Methods
1027// ============================================================================
1028
1029impl DakeraClient {
1030    // ========================================================================
1031    // Memory Operations
1032    // ========================================================================
1033
1034    /// Store a memory for an agent
1035    ///
1036    /// # Example
1037    ///
1038    /// ```rust,no_run
1039    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
1040    ///
1041    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1042    /// let client = DakeraClient::new("http://localhost:3000")?;
1043    ///
1044    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
1045    ///     .with_importance(0.8)
1046    ///     .with_tags(vec!["preferences".to_string()]);
1047    ///
1048    /// let response = client.store_memory(request).await?;
1049    /// println!("Stored memory: {}", response.memory_id);
1050    /// # Ok(())
1051    /// # }
1052    /// ```
1053    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
1054        let url = format!("{}/v1/memory/store", self.base_url);
1055        let response = self.client.post(&url).json(&request).send().await?;
1056        self.handle_response(response).await
1057    }
1058
1059    /// Recall memories by semantic query
1060    ///
1061    /// # Example
1062    ///
1063    /// ```rust,no_run
1064    /// use dakera_client::{DakeraClient, memory::RecallRequest};
1065    ///
1066    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1067    /// let client = DakeraClient::new("http://localhost:3000")?;
1068    ///
1069    /// let request = RecallRequest::new("agent-1", "user preferences")
1070    ///     .with_top_k(10);
1071    ///
1072    /// let response = client.recall(request).await?;
1073    /// for memory in response.memories {
1074    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
1075    /// }
1076    /// # Ok(())
1077    /// # }
1078    /// ```
1079    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
1080        let url = format!("{}/v1/memory/recall", self.base_url);
1081        let response = self.client.post(&url).json(&request).send().await?;
1082        self.handle_response(response).await
1083    }
1084
1085    /// Simple recall with just agent_id and query (convenience method)
1086    pub async fn recall_simple(
1087        &self,
1088        agent_id: &str,
1089        query: &str,
1090        top_k: usize,
1091    ) -> Result<RecallResponse> {
1092        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
1093            .await
1094    }
1095
1096    /// Get a specific memory by ID
1097    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
1098        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
1099        let response = self.client.get(&url).send().await?;
1100        self.handle_response(response).await
1101    }
1102
1103    /// Forget (delete) memories
1104    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
1105        let url = format!("{}/v1/memory/forget", self.base_url);
1106        let response = self.client.post(&url).json(&request).send().await?;
1107        self.handle_response(response).await
1108    }
1109
1110    /// Search memories with advanced filters
1111    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
1112        let url = format!("{}/v1/memory/search", self.base_url);
1113        let response = self.client.post(&url).json(&request).send().await?;
1114        self.handle_response(response).await
1115    }
1116
1117    /// Update an existing memory
1118    pub async fn update_memory(
1119        &self,
1120        agent_id: &str,
1121        memory_id: &str,
1122        request: UpdateMemoryRequest,
1123    ) -> Result<StoreMemoryResponse> {
1124        let url = format!(
1125            "{}/v1/agents/{}/memories/{}",
1126            self.base_url, agent_id, memory_id
1127        );
1128        let response = self.client.put(&url).json(&request).send().await?;
1129        self.handle_response(response).await
1130    }
1131
1132    /// Update importance of memories
1133    pub async fn update_importance(
1134        &self,
1135        agent_id: &str,
1136        request: UpdateImportanceRequest,
1137    ) -> Result<serde_json::Value> {
1138        let url = format!(
1139            "{}/v1/agents/{}/memories/importance",
1140            self.base_url, agent_id
1141        );
1142        let response = self.client.put(&url).json(&request).send().await?;
1143        self.handle_response(response).await
1144    }
1145
1146    /// Consolidate memories for an agent
1147    pub async fn consolidate(
1148        &self,
1149        agent_id: &str,
1150        request: ConsolidateRequest,
1151    ) -> Result<ConsolidateResponse> {
1152        // Server endpoint: POST /v1/memory/consolidate with agent_id in body
1153        let url = format!("{}/v1/memory/consolidate", self.base_url);
1154        let mut body = serde_json::to_value(&request)?;
1155        body["agent_id"] = serde_json::Value::String(agent_id.to_string());
1156        let response = self.client.post(&url).json(&body).send().await?;
1157        self.handle_response(response).await
1158    }
1159
1160    /// Submit feedback on a memory recall
1161    pub async fn memory_feedback(
1162        &self,
1163        agent_id: &str,
1164        request: FeedbackRequest,
1165    ) -> Result<LegacyFeedbackResponse> {
1166        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
1167        let response = self.client.post(&url).json(&request).send().await?;
1168        self.handle_response(response).await
1169    }
1170
1171    // ========================================================================
1172    // Memory Feedback Loop — INT-1
1173    // ========================================================================
1174
1175    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
1176    ///
1177    /// # Arguments
1178    /// * `memory_id` – The memory to give feedback on.
1179    /// * `agent_id` – The agent that owns the memory.
1180    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
1181    ///
1182    /// # Example
1183    /// ```no_run
1184    /// # use dakera_client::{DakeraClient, FeedbackSignal};
1185    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1186    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
1187    /// println!("new importance: {}", resp.new_importance);
1188    /// # Ok(()) }
1189    /// ```
1190    pub async fn feedback_memory(
1191        &self,
1192        memory_id: &str,
1193        agent_id: &str,
1194        signal: FeedbackSignal,
1195    ) -> Result<FeedbackResponse> {
1196        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1197        let body = MemoryFeedbackBody {
1198            agent_id: agent_id.to_string(),
1199            signal,
1200        };
1201        let response = self.client.post(&url).json(&body).send().await?;
1202        self.handle_response(response).await
1203    }
1204
1205    /// Get the full feedback history for a memory (INT-1).
1206    pub async fn get_memory_feedback_history(
1207        &self,
1208        memory_id: &str,
1209    ) -> Result<FeedbackHistoryResponse> {
1210        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1211        let response = self.client.get(&url).send().await?;
1212        self.handle_response(response).await
1213    }
1214
1215    /// Get aggregate feedback counts and health score for an agent (INT-1).
1216    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
1217        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
1218        let response = self.client.get(&url).send().await?;
1219        self.handle_response(response).await
1220    }
1221
1222    /// Directly override a memory's importance score (INT-1).
1223    ///
1224    /// # Arguments
1225    /// * `memory_id` – The memory to update.
1226    /// * `agent_id` – The agent that owns the memory.
1227    /// * `importance` – New importance value (0.0–1.0).
1228    pub async fn patch_memory_importance(
1229        &self,
1230        memory_id: &str,
1231        agent_id: &str,
1232        importance: f32,
1233    ) -> Result<FeedbackResponse> {
1234        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
1235        let body = MemoryImportancePatch {
1236            agent_id: agent_id.to_string(),
1237            importance,
1238        };
1239        let response = self.client.patch(&url).json(&body).send().await?;
1240        self.handle_response(response).await
1241    }
1242
1243    /// Get overall feedback health score for an agent (INT-1).
1244    ///
1245    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
1246    /// A higher score indicates a healthier, more relevant memory store.
1247    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
1248        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
1249        let response = self.client.get(&url).send().await?;
1250        self.handle_response(response).await
1251    }
1252
1253    // ========================================================================
1254    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
1255    // ========================================================================
1256
1257    /// Traverse the knowledge graph from a memory node.
1258    ///
1259    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1260    ///
1261    /// # Arguments
1262    /// * `memory_id` – Root memory ID to start traversal from.
1263    /// * `options` – Traversal options (depth, edge type filters).
1264    ///
1265    /// # Example
1266    /// ```no_run
1267    /// # use dakera_client::{DakeraClient, GraphOptions};
1268    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1269    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
1270    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
1271    /// # Ok(()) }
1272    /// ```
1273    pub async fn memory_graph(
1274        &self,
1275        memory_id: &str,
1276        options: GraphOptions,
1277    ) -> Result<MemoryGraph> {
1278        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
1279        let depth = options.depth.unwrap_or(1);
1280        url.push_str(&format!("?depth={}", depth));
1281        if let Some(types) = &options.types {
1282            let type_strs: Vec<String> = types
1283                .iter()
1284                .map(|t| {
1285                    serde_json::to_value(t)
1286                        .unwrap()
1287                        .as_str()
1288                        .unwrap_or("")
1289                        .to_string()
1290                })
1291                .collect();
1292            if !type_strs.is_empty() {
1293                url.push_str(&format!("&types={}", type_strs.join(",")));
1294            }
1295        }
1296        let response = self.client.get(&url).send().await?;
1297        self.handle_response(response).await
1298    }
1299
1300    /// Find the shortest path between two memories in the knowledge graph.
1301    ///
1302    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1303    ///
1304    /// # Example
1305    /// ```no_run
1306    /// # use dakera_client::DakeraClient;
1307    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1308    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
1309    /// println!("{} hops: {:?}", path.hops, path.path);
1310    /// # Ok(()) }
1311    /// ```
1312    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
1313        let url = format!(
1314            "{}/v1/memories/{}/path?target={}",
1315            self.base_url,
1316            source_id,
1317            urlencoding::encode(target_id)
1318        );
1319        let response = self.client.get(&url).send().await?;
1320        self.handle_response(response).await
1321    }
1322
1323    /// Create an explicit edge between two memories.
1324    ///
1325    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1326    ///
1327    /// # Example
1328    /// ```no_run
1329    /// # use dakera_client::{DakeraClient, EdgeType};
1330    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1331    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
1332    /// println!("Created edge: {}", resp.edge.id);
1333    /// # Ok(()) }
1334    /// ```
1335    pub async fn memory_link(
1336        &self,
1337        source_id: &str,
1338        target_id: &str,
1339        edge_type: EdgeType,
1340    ) -> Result<GraphLinkResponse> {
1341        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
1342        let request = GraphLinkRequest {
1343            target_id: target_id.to_string(),
1344            edge_type,
1345        };
1346        let response = self.client.post(&url).json(&request).send().await?;
1347        self.handle_response(response).await
1348    }
1349
1350    /// Export the full knowledge graph for an agent.
1351    ///
1352    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1353    ///
1354    /// # Arguments
1355    /// * `agent_id` – Agent whose graph to export.
1356    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
1357    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
1358        let url = format!(
1359            "{}/v1/agents/{}/graph/export?format={}",
1360            self.base_url, agent_id, format
1361        );
1362        let response = self.client.get(&url).send().await?;
1363        self.handle_response(response).await
1364    }
1365
1366    // ========================================================================
1367    // Session Operations
1368    // ========================================================================
1369
1370    /// Start a new session for an agent
1371    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
1372        let url = format!("{}/v1/sessions/start", self.base_url);
1373        let request = SessionStartRequest {
1374            agent_id: agent_id.to_string(),
1375            metadata: None,
1376        };
1377        let response = self.client.post(&url).json(&request).send().await?;
1378        let resp: SessionStartResponse = self.handle_response(response).await?;
1379        Ok(resp.session)
1380    }
1381
1382    /// Start a session with metadata
1383    pub async fn start_session_with_metadata(
1384        &self,
1385        agent_id: &str,
1386        metadata: serde_json::Value,
1387    ) -> Result<Session> {
1388        let url = format!("{}/v1/sessions/start", self.base_url);
1389        let request = SessionStartRequest {
1390            agent_id: agent_id.to_string(),
1391            metadata: Some(metadata),
1392        };
1393        let response = self.client.post(&url).json(&request).send().await?;
1394        let resp: SessionStartResponse = self.handle_response(response).await?;
1395        Ok(resp.session)
1396    }
1397
1398    /// End a session, optionally with a summary.
1399    /// Returns the session state and the total memory count at close.
1400    pub async fn end_session(
1401        &self,
1402        session_id: &str,
1403        summary: Option<String>,
1404    ) -> Result<SessionEndResponse> {
1405        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
1406        let request = SessionEndRequest { summary };
1407        let response = self.client.post(&url).json(&request).send().await?;
1408        self.handle_response(response).await
1409    }
1410
1411    /// Get a session by ID
1412    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
1413        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
1414        let response = self.client.get(&url).send().await?;
1415        self.handle_response(response).await
1416    }
1417
1418    /// List sessions for an agent
1419    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
1420        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
1421        let response = self.client.get(&url).send().await?;
1422        self.handle_response(response).await
1423    }
1424
1425    /// Get memories in a session
1426    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
1427        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
1428        let response = self.client.get(&url).send().await?;
1429        self.handle_response(response).await
1430    }
1431
1432    // ========================================================================
1433    // CE-2: Batch Recall / Forget
1434    // ========================================================================
1435
1436    /// Bulk-recall memories using filter predicates (CE-2).
1437    ///
1438    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
1439    ///
1440    /// # Example
1441    ///
1442    /// ```rust,no_run
1443    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
1444    ///
1445    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1446    /// let client = DakeraClient::new("http://localhost:3000")?;
1447    ///
1448    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
1449    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
1450    /// let resp = client.batch_recall(req).await?;
1451    /// println!("Found {} memories", resp.filtered);
1452    /// # Ok(())
1453    /// # }
1454    /// ```
1455    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
1456        let url = format!("{}/v1/memories/recall/batch", self.base_url);
1457        let response = self.client.post(&url).json(&request).send().await?;
1458        self.handle_response(response).await
1459    }
1460
1461    /// Bulk-delete memories using filter predicates (CE-2).
1462    ///
1463    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
1464    /// one filter predicate to be set as a safety guard.
1465    ///
1466    /// # Example
1467    ///
1468    /// ```rust,no_run
1469    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
1470    ///
1471    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1472    /// let client = DakeraClient::new("http://localhost:3000")?;
1473    ///
1474    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
1475    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
1476    /// println!("Deleted {} memories", resp.deleted_count);
1477    /// # Ok(())
1478    /// # }
1479    /// ```
1480    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
1481        let url = format!("{}/v1/memories/forget/batch", self.base_url);
1482        let response = self.client.delete(&url).json(&request).send().await?;
1483        self.handle_response(response).await
1484    }
1485
1486    // ========================================================================
1487    // DX-1: Memory Import / Export
1488    // ========================================================================
1489
1490    /// Import memories from an external format (DX-1).
1491    ///
1492    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1493    ///
1494    /// ```no_run
1495    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1496    /// let client = dakera_client::DakeraClient::new("http://localhost:3000")?;
1497    /// let data = serde_json::json!([{"content": "hello", "agent_id": "agent-1"}]);
1498    /// let resp = client.import_memories(data, "jsonl", None, None).await?;
1499    /// println!("Imported {} memories", resp.imported_count);
1500    /// # Ok(())
1501    /// # }
1502    /// ```
1503    pub async fn import_memories(
1504        &self,
1505        data: serde_json::Value,
1506        format: &str,
1507        agent_id: Option<&str>,
1508        namespace: Option<&str>,
1509    ) -> Result<MemoryImportResponse> {
1510        let mut body = serde_json::json!({"data": data, "format": format});
1511        if let Some(aid) = agent_id {
1512            body["agent_id"] = serde_json::Value::String(aid.to_string());
1513        }
1514        if let Some(ns) = namespace {
1515            body["namespace"] = serde_json::Value::String(ns.to_string());
1516        }
1517        let url = format!("{}/v1/import", self.base_url);
1518        let response = self.client.post(&url).json(&body).send().await?;
1519        self.handle_response(response).await
1520    }
1521
1522    /// Export memories in a portable format (DX-1).
1523    ///
1524    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1525    pub async fn export_memories(
1526        &self,
1527        format: &str,
1528        agent_id: Option<&str>,
1529        namespace: Option<&str>,
1530        limit: Option<u32>,
1531    ) -> Result<MemoryExportResponse> {
1532        let mut params = vec![("format", format.to_string())];
1533        if let Some(aid) = agent_id {
1534            params.push(("agent_id", aid.to_string()));
1535        }
1536        if let Some(ns) = namespace {
1537            params.push(("namespace", ns.to_string()));
1538        }
1539        if let Some(l) = limit {
1540            params.push(("limit", l.to_string()));
1541        }
1542        let url = format!("{}/v1/export", self.base_url);
1543        let response = self.client.get(&url).query(&params).send().await?;
1544        self.handle_response(response).await
1545    }
1546
1547    // ========================================================================
1548    // OBS-1: Business-Event Audit Log
1549    // ========================================================================
1550
1551    /// List paginated audit log entries (OBS-1).
1552    pub async fn list_audit_events(&self, query: AuditQuery) -> Result<AuditListResponse> {
1553        let url = format!("{}/v1/audit", self.base_url);
1554        let response = self.client.get(&url).query(&query).send().await?;
1555        self.handle_response(response).await
1556    }
1557
1558    /// Stream live audit events via SSE (OBS-1).
1559    ///
1560    /// Returns a [`tokio::sync::mpsc::Receiver`] that yields [`DakeraEvent`] results.
1561    pub async fn stream_audit_events(
1562        &self,
1563        agent_id: Option<&str>,
1564        event_type: Option<&str>,
1565    ) -> Result<tokio::sync::mpsc::Receiver<Result<crate::events::DakeraEvent>>> {
1566        let mut params: Vec<(&str, String)> = Vec::new();
1567        if let Some(aid) = agent_id {
1568            params.push(("agent_id", aid.to_string()));
1569        }
1570        if let Some(et) = event_type {
1571            params.push(("event_type", et.to_string()));
1572        }
1573        let base = format!("{}/v1/audit/stream", self.base_url);
1574        let url = if params.is_empty() {
1575            base
1576        } else {
1577            let qs = params
1578                .iter()
1579                .map(|(k, v)| format!("{}={}", k, urlencoding::encode(v)))
1580                .collect::<Vec<_>>()
1581                .join("&");
1582            format!("{}?{}", base, qs)
1583        };
1584        self.stream_sse(url).await
1585    }
1586
1587    /// Bulk-export audit log entries (OBS-1).
1588    pub async fn export_audit(
1589        &self,
1590        format: &str,
1591        agent_id: Option<&str>,
1592        event_type: Option<&str>,
1593        from_ts: Option<u64>,
1594        to_ts: Option<u64>,
1595    ) -> Result<AuditExportResponse> {
1596        let mut body = serde_json::json!({"format": format});
1597        if let Some(aid) = agent_id {
1598            body["agent_id"] = serde_json::Value::String(aid.to_string());
1599        }
1600        if let Some(et) = event_type {
1601            body["event_type"] = serde_json::Value::String(et.to_string());
1602        }
1603        if let Some(f) = from_ts {
1604            body["from"] = serde_json::Value::Number(f.into());
1605        }
1606        if let Some(t) = to_ts {
1607            body["to"] = serde_json::Value::Number(t.into());
1608        }
1609        let url = format!("{}/v1/audit/export", self.base_url);
1610        let response = self.client.post(&url).json(&body).send().await?;
1611        self.handle_response(response).await
1612    }
1613
1614    // ========================================================================
1615    // EXT-1: External Extraction Providers
1616    // ========================================================================
1617
1618    /// Extract entities from text using a pluggable provider (EXT-1).
1619    ///
1620    /// Provider hierarchy: per-request > namespace default > GLiNER (bundled).
1621    /// Supported providers: `"gliner"`, `"openai"`, `"anthropic"`, `"openrouter"`, `"ollama"`.
1622    pub async fn extract_text(
1623        &self,
1624        text: &str,
1625        namespace: Option<&str>,
1626        provider: Option<&str>,
1627        model: Option<&str>,
1628    ) -> Result<ExtractionResult> {
1629        let mut body = serde_json::json!({"text": text});
1630        if let Some(ns) = namespace {
1631            body["namespace"] = serde_json::Value::String(ns.to_string());
1632        }
1633        if let Some(p) = provider {
1634            body["provider"] = serde_json::Value::String(p.to_string());
1635        }
1636        if let Some(m) = model {
1637            body["model"] = serde_json::Value::String(m.to_string());
1638        }
1639        let url = format!("{}/v1/extract", self.base_url);
1640        let response = self.client.post(&url).json(&body).send().await?;
1641        self.handle_response(response).await
1642    }
1643
1644    /// List available extraction providers and their models (EXT-1).
1645    pub async fn list_extract_providers(&self) -> Result<Vec<ExtractionProviderInfo>> {
1646        let url = format!("{}/v1/extract/providers", self.base_url);
1647        let response = self.client.get(&url).send().await?;
1648        let result: ExtractProvidersResponse = self.handle_response(response).await?;
1649        Ok(match result {
1650            ExtractProvidersResponse::List(v) => v,
1651            ExtractProvidersResponse::Object { providers } => providers,
1652        })
1653    }
1654
1655    /// Set the default extraction provider for a namespace (EXT-1).
1656    pub async fn configure_namespace_extractor(
1657        &self,
1658        namespace: &str,
1659        provider: &str,
1660        model: Option<&str>,
1661    ) -> Result<serde_json::Value> {
1662        let mut body = serde_json::json!({"provider": provider});
1663        if let Some(m) = model {
1664            body["model"] = serde_json::Value::String(m.to_string());
1665        }
1666        let url = format!(
1667            "{}/v1/namespaces/{}/extractor",
1668            self.base_url,
1669            urlencoding::encode(namespace)
1670        );
1671        let response = self.client.patch(&url).json(&body).send().await?;
1672        self.handle_response(response).await
1673    }
1674
1675    // =========================================================================
1676    // SEC-3: AES-256-GCM Encryption Key Rotation
1677    // =========================================================================
1678
1679    /// Re-encrypt all memory content blobs with a new AES-256-GCM key (SEC-3).
1680    ///
1681    /// After this call the new key is active in the running process.
1682    /// The operator must update `DAKERA_ENCRYPTION_KEY` and restart to make
1683    /// the rotation durable across restarts.
1684    ///
1685    /// Requires Admin scope.
1686    ///
1687    /// # Arguments
1688    /// * `new_key` - New passphrase or 64-char hex key.
1689    /// * `namespace` - If `Some`, rotate only this namespace. `None` rotates all.
1690    pub async fn rotate_encryption_key(
1691        &self,
1692        new_key: &str,
1693        namespace: Option<&str>,
1694    ) -> Result<RotateEncryptionKeyResponse> {
1695        let body = RotateEncryptionKeyRequest {
1696            new_key: new_key.to_string(),
1697            namespace: namespace.map(|s| s.to_string()),
1698        };
1699        let url = format!("{}/v1/admin/encryption/rotate-key", self.base_url);
1700        let response = self.client.post(&url).json(&body).send().await?;
1701        self.handle_response(response).await
1702    }
1703}