Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22#[serde(rename_all = "lowercase")]
23pub enum MemoryType {
24    #[default]
25    Episodic,
26    Semantic,
27    Procedural,
28    Working,
29}
30
31/// Store a memory request
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct StoreMemoryRequest {
34    pub agent_id: String,
35    pub content: String,
36    #[serde(default)]
37    pub memory_type: MemoryType,
38    #[serde(default = "default_importance")]
39    pub importance: f32,
40    #[serde(default)]
41    pub tags: Vec<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub session_id: Option<String>,
44    #[serde(skip_serializing_if = "Option::is_none")]
45    pub metadata: Option<serde_json::Value>,
46    /// Optional TTL in seconds. The memory is hard-deleted after this many
47    /// seconds from creation.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub ttl_seconds: Option<u64>,
50    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
51    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
52    /// decay engine on expiry (DECAY-3).
53    #[serde(skip_serializing_if = "Option::is_none")]
54    pub expires_at: Option<u64>,
55}
56
57fn default_importance() -> f32 {
58    0.5
59}
60
61impl StoreMemoryRequest {
62    /// Create a new store memory request
63    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
64        Self {
65            agent_id: agent_id.into(),
66            content: content.into(),
67            memory_type: MemoryType::default(),
68            importance: 0.5,
69            tags: Vec::new(),
70            session_id: None,
71            metadata: None,
72            ttl_seconds: None,
73            expires_at: None,
74        }
75    }
76
77    /// Set memory type
78    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
79        self.memory_type = memory_type;
80        self
81    }
82
83    /// Set importance score
84    pub fn with_importance(mut self, importance: f32) -> Self {
85        self.importance = importance.clamp(0.0, 1.0);
86        self
87    }
88
89    /// Set tags
90    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
91        self.tags = tags;
92        self
93    }
94
95    /// Set session ID
96    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
97        self.session_id = Some(session_id.into());
98        self
99    }
100
101    /// Set metadata
102    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
103        self.metadata = Some(metadata);
104        self
105    }
106
107    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
108    /// from creation.
109    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
110        self.ttl_seconds = Some(ttl_seconds);
111        self
112    }
113
114    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
115    /// `ttl_seconds` when both are set (DECAY-3).
116    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
117        self.expires_at = Some(expires_at);
118        self
119    }
120}
121
122/// Stored memory response from `POST /v1/memory/store`.
123///
124/// The server wraps the memory in a nested `memory` object:
125/// `{"memory": {"id": "...", "agent_id": "...", ...}, "embedding_time_ms": N}`.
126/// The `memory_id` and `agent_id` fields are convenience accessors mapped from
127/// `memory.id` and `memory.agent_id` respectively.
128#[derive(Debug, Clone, Serialize)]
129pub struct StoreMemoryResponse {
130    /// Memory ID (mapped from `memory.id`)
131    pub memory_id: String,
132    /// Agent ID (mapped from `memory.agent_id`)
133    pub agent_id: String,
134    /// Namespace (mapped from `memory.namespace`, defaults to `"default"`)
135    pub namespace: String,
136    /// Embedding latency in milliseconds
137    pub embedding_time_ms: Option<u64>,
138}
139
140impl<'de> serde::Deserialize<'de> for StoreMemoryResponse {
141    fn deserialize<D: serde::Deserializer<'de>>(
142        deserializer: D,
143    ) -> std::result::Result<Self, D::Error> {
144        use serde::de::Error;
145        let val = serde_json::Value::deserialize(deserializer)?;
146
147        // Server response: {"memory": {"id":"...","agent_id":"...",...}, "embedding_time_ms": N}
148        if let Some(memory) = val.get("memory") {
149            let memory_id = memory
150                .get("id")
151                .and_then(|v| v.as_str())
152                .ok_or_else(|| D::Error::missing_field("memory.id"))?
153                .to_string();
154            let agent_id = memory
155                .get("agent_id")
156                .and_then(|v| v.as_str())
157                .unwrap_or("")
158                .to_string();
159            let namespace = memory
160                .get("namespace")
161                .and_then(|v| v.as_str())
162                .unwrap_or("default")
163                .to_string();
164            let embedding_time_ms = val.get("embedding_time_ms").and_then(|v| v.as_u64());
165            return Ok(Self {
166                memory_id,
167                agent_id,
168                namespace,
169                embedding_time_ms,
170            });
171        }
172
173        // Legacy / mock format: {"memory_id":"...","agent_id":"...","namespace":"..."}
174        let memory_id = val
175            .get("memory_id")
176            .and_then(|v| v.as_str())
177            .ok_or_else(|| D::Error::missing_field("memory_id"))?
178            .to_string();
179        let agent_id = val
180            .get("agent_id")
181            .and_then(|v| v.as_str())
182            .unwrap_or("")
183            .to_string();
184        let namespace = val
185            .get("namespace")
186            .and_then(|v| v.as_str())
187            .unwrap_or("default")
188            .to_string();
189        Ok(Self {
190            memory_id,
191            agent_id,
192            namespace,
193            embedding_time_ms: None,
194        })
195    }
196}
197
198/// Retrieval routing mode for recall and search (CE-10).
199///
200/// Controls which retrieval index the server uses. `Auto` (default) lets the
201/// server pick the best strategy based on the query.
202#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
203#[serde(rename_all = "snake_case")]
204pub enum RoutingMode {
205    /// Server picks the best strategy (default).
206    Auto,
207    /// Force ANN vector search (HNSW).
208    Vector,
209    /// Force BM25 full-text search.
210    Bm25,
211    /// Fuse ANN and BM25 scores (RRF).
212    Hybrid,
213}
214
215/// Recall memories request
216#[derive(Debug, Clone, Serialize, Deserialize)]
217pub struct RecallRequest {
218    pub agent_id: String,
219    pub query: String,
220    #[serde(default = "default_top_k")]
221    pub top_k: usize,
222    #[serde(skip_serializing_if = "Option::is_none")]
223    pub memory_type: Option<MemoryType>,
224    #[serde(default)]
225    pub min_importance: f32,
226    #[serde(skip_serializing_if = "Option::is_none")]
227    pub session_id: Option<String>,
228    #[serde(default)]
229    pub tags: Vec<String>,
230    /// COG-2: traverse KG depth-1 from recalled memories and include
231    /// associatively linked memories in the response (default: false)
232    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
233    pub include_associated: bool,
234    /// COG-2: max associated memories to return (default: 10, max: 10)
235    #[serde(skip_serializing_if = "Option::is_none")]
236    pub associated_memories_cap: Option<u32>,
237    /// KG-3: KG traversal depth 1–3 (default: 1); requires include_associated
238    #[serde(skip_serializing_if = "Option::is_none")]
239    pub associated_memories_depth: Option<u8>,
240    /// KG-3: minimum edge weight for KG traversal (default: 0.0)
241    #[serde(skip_serializing_if = "Option::is_none")]
242    pub associated_memories_min_weight: Option<f32>,
243    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
244    #[serde(skip_serializing_if = "Option::is_none")]
245    pub since: Option<String>,
246    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub until: Option<String>,
249    /// CE-10: retrieval routing mode. `None` uses the server default (`auto`).
250    #[serde(skip_serializing_if = "Option::is_none")]
251    pub routing: Option<RoutingMode>,
252}
253
254fn default_top_k() -> usize {
255    5
256}
257
258impl RecallRequest {
259    /// Create a new recall request
260    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
261        Self {
262            agent_id: agent_id.into(),
263            query: query.into(),
264            top_k: 5,
265            memory_type: None,
266            min_importance: 0.0,
267            session_id: None,
268            tags: Vec::new(),
269            include_associated: false,
270            associated_memories_cap: None,
271            associated_memories_depth: None,
272            associated_memories_min_weight: None,
273            since: None,
274            until: None,
275            routing: None,
276        }
277    }
278
279    /// Set number of results
280    pub fn with_top_k(mut self, top_k: usize) -> Self {
281        self.top_k = top_k;
282        self
283    }
284
285    /// Filter by memory type
286    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
287        self.memory_type = Some(memory_type);
288        self
289    }
290
291    /// Set minimum importance threshold
292    pub fn with_min_importance(mut self, min: f32) -> Self {
293        self.min_importance = min;
294        self
295    }
296
297    /// Filter by session
298    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
299        self.session_id = Some(session_id.into());
300        self
301    }
302
303    /// Filter by tags
304    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
305        self.tags = tags;
306        self
307    }
308
309    /// COG-2: include KG depth-1 associated memories in the response
310    pub fn with_associated(mut self) -> Self {
311        self.include_associated = true;
312        self
313    }
314
315    /// COG-2: set max associated memories cap (default: 10, max: 10)
316    pub fn with_associated_cap(mut self, cap: u32) -> Self {
317        self.include_associated = true;
318        self.associated_memories_cap = Some(cap);
319        self
320    }
321
322    /// CE-7: only recall memories created at or after this ISO-8601 timestamp
323    pub fn with_since(mut self, since: impl Into<String>) -> Self {
324        self.since = Some(since.into());
325        self
326    }
327
328    /// CE-7: only recall memories created at or before this ISO-8601 timestamp
329    pub fn with_until(mut self, until: impl Into<String>) -> Self {
330        self.until = Some(until.into());
331        self
332    }
333
334    /// CE-10: set retrieval routing mode
335    pub fn with_routing(mut self, routing: RoutingMode) -> Self {
336        self.routing = Some(routing);
337        self
338    }
339
340    /// KG-3: set KG traversal depth (1–3, default: 1); implies include_associated
341    pub fn with_associated_depth(mut self, depth: u8) -> Self {
342        self.include_associated = true;
343        self.associated_memories_depth = Some(depth);
344        self
345    }
346
347    /// KG-3: set minimum edge weight for KG traversal (default: 0.0)
348    pub fn with_associated_min_weight(mut self, weight: f32) -> Self {
349        self.associated_memories_min_weight = Some(weight);
350        self
351    }
352}
353
354/// A recalled memory
355#[derive(Debug, Clone, Serialize)]
356pub struct RecalledMemory {
357    pub id: String,
358    pub content: String,
359    pub memory_type: MemoryType,
360    pub importance: f32,
361    pub score: f32,
362    #[serde(default)]
363    pub tags: Vec<String>,
364    #[serde(skip_serializing_if = "Option::is_none")]
365    pub session_id: Option<String>,
366    #[serde(skip_serializing_if = "Option::is_none")]
367    pub metadata: Option<serde_json::Value>,
368    pub created_at: u64,
369    pub last_accessed_at: u64,
370    pub access_count: u32,
371    /// KG-3: hop depth at which this memory was found (only set on associated memories)
372    #[serde(skip_serializing_if = "Option::is_none")]
373    pub depth: Option<u8>,
374}
375
376impl<'de> serde::Deserialize<'de> for RecalledMemory {
377    fn deserialize<D: serde::Deserializer<'de>>(
378        deserializer: D,
379    ) -> std::result::Result<Self, D::Error> {
380        use serde::de::Error as _;
381        let val = serde_json::Value::deserialize(deserializer)?;
382
383        // Server wraps recall results as {memory:{...}, score, weighted_score, smart_score}.
384        // Fall back to flat format for direct memory-get responses.
385        let score = val
386            .get("score")
387            .and_then(|v| v.as_f64())
388            .or_else(|| val.get("weighted_score").and_then(|v| v.as_f64()))
389            .unwrap_or(0.0) as f32;
390
391        let mem = val.get("memory").unwrap_or(&val);
392
393        let id = mem
394            .get("id")
395            .and_then(|v| v.as_str())
396            .ok_or_else(|| D::Error::missing_field("id"))?
397            .to_string();
398        let content = mem
399            .get("content")
400            .and_then(|v| v.as_str())
401            .ok_or_else(|| D::Error::missing_field("content"))?
402            .to_string();
403        let memory_type: MemoryType = mem
404            .get("memory_type")
405            .and_then(|v| serde_json::from_value(v.clone()).ok())
406            .unwrap_or(MemoryType::Episodic);
407        let importance = mem
408            .get("importance")
409            .and_then(|v| v.as_f64())
410            .unwrap_or(0.5) as f32;
411        let tags: Vec<String> = mem
412            .get("tags")
413            .and_then(|v| serde_json::from_value(v.clone()).ok())
414            .unwrap_or_default();
415        let session_id = mem
416            .get("session_id")
417            .and_then(|v| v.as_str())
418            .map(String::from);
419        let metadata = mem.get("metadata").cloned().filter(|v| !v.is_null());
420        let created_at = mem.get("created_at").and_then(|v| v.as_u64()).unwrap_or(0);
421        let last_accessed_at = mem
422            .get("last_accessed_at")
423            .and_then(|v| v.as_u64())
424            .unwrap_or(0);
425        let access_count = mem
426            .get("access_count")
427            .and_then(|v| v.as_u64())
428            .unwrap_or(0) as u32;
429        let depth = mem.get("depth").and_then(|v| v.as_u64()).map(|v| v as u8);
430
431        Ok(Self {
432            id,
433            content,
434            memory_type,
435            importance,
436            score,
437            tags,
438            session_id,
439            metadata,
440            created_at,
441            last_accessed_at,
442            access_count,
443            depth,
444        })
445    }
446}
447
448/// Recall response
449#[derive(Debug, Clone, Serialize, Deserialize)]
450pub struct RecallResponse {
451    pub memories: Vec<RecalledMemory>,
452    #[serde(default)]
453    pub total_found: usize,
454    /// COG-2 / KG-3: KG associated memories at configurable depth (only present when include_associated was true)
455    #[serde(skip_serializing_if = "Option::is_none")]
456    pub associated_memories: Option<Vec<RecalledMemory>>,
457}
458
459/// Forget (delete) memories request
460#[derive(Debug, Clone, Serialize, Deserialize)]
461pub struct ForgetRequest {
462    pub agent_id: String,
463    #[serde(default)]
464    pub memory_ids: Vec<String>,
465    #[serde(default)]
466    pub tags: Vec<String>,
467    #[serde(skip_serializing_if = "Option::is_none")]
468    pub session_id: Option<String>,
469    #[serde(skip_serializing_if = "Option::is_none")]
470    pub before_timestamp: Option<u64>,
471}
472
473impl ForgetRequest {
474    /// Forget specific memories by ID
475    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
476        Self {
477            agent_id: agent_id.into(),
478            memory_ids: ids,
479            tags: Vec::new(),
480            session_id: None,
481            before_timestamp: None,
482        }
483    }
484
485    /// Forget memories with specific tags
486    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
487        Self {
488            agent_id: agent_id.into(),
489            memory_ids: Vec::new(),
490            tags,
491            session_id: None,
492            before_timestamp: None,
493        }
494    }
495
496    /// Forget all memories in a session
497    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
498        Self {
499            agent_id: agent_id.into(),
500            memory_ids: Vec::new(),
501            tags: Vec::new(),
502            session_id: Some(session_id.into()),
503            before_timestamp: None,
504        }
505    }
506}
507
508/// Forget response
509#[derive(Debug, Clone, Serialize, Deserialize)]
510pub struct ForgetResponse {
511    pub deleted_count: u64,
512}
513
514/// Session start request
515#[derive(Debug, Clone, Serialize, Deserialize)]
516pub struct SessionStartRequest {
517    pub agent_id: String,
518    #[serde(skip_serializing_if = "Option::is_none")]
519    pub metadata: Option<serde_json::Value>,
520}
521
522/// Session information
523#[derive(Debug, Clone, Serialize, Deserialize)]
524pub struct Session {
525    pub id: String,
526    pub agent_id: String,
527    pub started_at: u64,
528    #[serde(skip_serializing_if = "Option::is_none")]
529    pub ended_at: Option<u64>,
530    #[serde(skip_serializing_if = "Option::is_none")]
531    pub summary: Option<String>,
532    #[serde(skip_serializing_if = "Option::is_none")]
533    pub metadata: Option<serde_json::Value>,
534    /// Cached count of memories in this session
535    #[serde(default)]
536    pub memory_count: usize,
537}
538
539/// Session end request
540#[derive(Debug, Clone, Serialize, Deserialize)]
541pub struct SessionEndRequest {
542    #[serde(skip_serializing_if = "Option::is_none")]
543    pub summary: Option<String>,
544}
545
546/// Response from `POST /v1/sessions/start`
547#[derive(Debug, Clone, Serialize, Deserialize)]
548pub struct SessionStartResponse {
549    pub session: Session,
550}
551
552/// Response from `POST /v1/sessions/{id}/end`
553#[derive(Debug, Clone, Serialize, Deserialize)]
554pub struct SessionEndResponse {
555    pub session: Session,
556    pub memory_count: usize,
557}
558
559/// Request to update a memory
560#[derive(Debug, Clone, Serialize, Deserialize)]
561pub struct UpdateMemoryRequest {
562    #[serde(skip_serializing_if = "Option::is_none")]
563    pub content: Option<String>,
564    #[serde(skip_serializing_if = "Option::is_none")]
565    pub metadata: Option<serde_json::Value>,
566    #[serde(skip_serializing_if = "Option::is_none")]
567    pub memory_type: Option<MemoryType>,
568}
569
570/// Request to update memory importance
571#[derive(Debug, Clone, Serialize, Deserialize)]
572pub struct UpdateImportanceRequest {
573    pub memory_ids: Vec<String>,
574    pub importance: f32,
575}
576
577/// DBSCAN algorithm config for adaptive consolidation (CE-6).
578#[derive(Debug, Clone, Serialize, Deserialize, Default)]
579pub struct ConsolidationConfig {
580    /// Clustering algorithm: `"dbscan"` (default) or `"greedy"`.
581    #[serde(skip_serializing_if = "Option::is_none")]
582    pub algorithm: Option<String>,
583    /// Minimum cluster samples for DBSCAN.
584    #[serde(skip_serializing_if = "Option::is_none")]
585    pub min_samples: Option<u32>,
586    /// Epsilon distance parameter for DBSCAN.
587    #[serde(skip_serializing_if = "Option::is_none")]
588    pub eps: Option<f32>,
589}
590
591/// One step in the consolidation execution log (CE-6).
592#[derive(Debug, Clone, Serialize, Deserialize)]
593pub struct ConsolidationLogEntry {
594    pub step: String,
595    pub memories_before: usize,
596    pub memories_after: usize,
597    pub duration_ms: f64,
598}
599
600/// Request to consolidate memories
601#[derive(Debug, Clone, Serialize, Deserialize, Default)]
602pub struct ConsolidateRequest {
603    #[serde(skip_serializing_if = "Option::is_none")]
604    pub memory_type: Option<String>,
605    #[serde(skip_serializing_if = "Option::is_none")]
606    pub threshold: Option<f32>,
607    #[serde(default)]
608    pub dry_run: bool,
609    /// Optional DBSCAN algorithm configuration (CE-6).
610    #[serde(skip_serializing_if = "Option::is_none")]
611    pub config: Option<ConsolidationConfig>,
612}
613
614/// Response from consolidation (`POST /v1/memory/consolidate`).
615///
616/// The server returns `{"memories_removed": N, "source_memory_ids": [...], "consolidated_memory": {...}}`.
617/// `consolidated_count` is mapped from `memories_removed` for backward compat.
618#[derive(Debug, Clone, Serialize)]
619pub struct ConsolidateResponse {
620    /// Number of source memories removed (= `memories_removed` from server)
621    pub consolidated_count: usize,
622    /// Alias for consolidated_count
623    pub removed_count: usize,
624    /// IDs of source memories that were removed
625    #[serde(default)]
626    pub new_memories: Vec<String>,
627    /// Step-by-step consolidation log (CE-6, optional).
628    #[serde(default, skip_serializing_if = "Vec::is_empty")]
629    pub log: Vec<ConsolidationLogEntry>,
630}
631
632impl<'de> serde::Deserialize<'de> for ConsolidateResponse {
633    fn deserialize<D: serde::Deserializer<'de>>(
634        deserializer: D,
635    ) -> std::result::Result<Self, D::Error> {
636        let val = serde_json::Value::deserialize(deserializer)?;
637        // Server format: {"consolidated_memory":{...}, "source_memory_ids":[...], "memories_removed": N}
638        let removed = val
639            .get("memories_removed")
640            .and_then(|v| v.as_u64())
641            .or_else(|| val.get("removed_count").and_then(|v| v.as_u64()))
642            .or_else(|| val.get("consolidated_count").and_then(|v| v.as_u64()))
643            .unwrap_or(0) as usize;
644        let source_ids: Vec<String> = val
645            .get("source_memory_ids")
646            .and_then(|v| v.as_array())
647            .map(|arr| {
648                arr.iter()
649                    .filter_map(|v| v.as_str().map(String::from))
650                    .collect()
651            })
652            .unwrap_or_default();
653        Ok(Self {
654            consolidated_count: removed,
655            removed_count: removed,
656            new_memories: source_ids,
657            log: vec![],
658        })
659    }
660}
661
662// ============================================================================
663// DX-1: Memory Import / Export
664// ============================================================================
665
666/// Response from `POST /v1/import` (DX-1).
667#[derive(Debug, Clone, Serialize, Deserialize)]
668pub struct MemoryImportResponse {
669    pub imported_count: usize,
670    pub skipped_count: usize,
671    #[serde(default)]
672    pub errors: Vec<String>,
673}
674
675/// Response from `GET /v1/export` (DX-1).
676#[derive(Debug, Clone, Serialize, Deserialize)]
677pub struct MemoryExportResponse {
678    pub data: Vec<serde_json::Value>,
679    pub format: String,
680    pub count: usize,
681}
682
683// ============================================================================
684// OBS-1: Business-Event Audit Log
685// ============================================================================
686
687/// A single business-event entry from the audit log (OBS-1).
688#[derive(Debug, Clone, Serialize, Deserialize)]
689pub struct AuditEvent {
690    pub id: String,
691    pub event_type: String,
692    #[serde(skip_serializing_if = "Option::is_none")]
693    pub agent_id: Option<String>,
694    #[serde(skip_serializing_if = "Option::is_none")]
695    pub namespace: Option<String>,
696    pub timestamp: u64,
697    #[serde(default)]
698    pub details: serde_json::Value,
699}
700
701/// Response from `GET /v1/audit` (OBS-1).
702#[derive(Debug, Clone, Serialize, Deserialize)]
703pub struct AuditListResponse {
704    pub events: Vec<AuditEvent>,
705    pub total: usize,
706    #[serde(skip_serializing_if = "Option::is_none")]
707    pub cursor: Option<String>,
708}
709
710/// Response from `POST /v1/audit/export` (OBS-1).
711#[derive(Debug, Clone, Serialize, Deserialize)]
712pub struct AuditExportResponse {
713    pub data: String,
714    pub format: String,
715    pub count: usize,
716}
717
718/// Query parameters for the audit log (OBS-1).
719#[derive(Debug, Clone, Serialize, Deserialize, Default)]
720pub struct AuditQuery {
721    #[serde(skip_serializing_if = "Option::is_none")]
722    pub agent_id: Option<String>,
723    #[serde(skip_serializing_if = "Option::is_none")]
724    pub event_type: Option<String>,
725    #[serde(skip_serializing_if = "Option::is_none")]
726    pub from: Option<u64>,
727    #[serde(skip_serializing_if = "Option::is_none")]
728    pub to: Option<u64>,
729    #[serde(skip_serializing_if = "Option::is_none")]
730    pub limit: Option<u32>,
731    #[serde(skip_serializing_if = "Option::is_none")]
732    pub cursor: Option<String>,
733}
734
735// ============================================================================
736// EXT-1: External Extraction Providers
737// ============================================================================
738
739/// Result from `POST /v1/extract` (EXT-1).
740#[derive(Debug, Clone, Serialize, Deserialize)]
741pub struct ExtractionResult {
742    pub entities: Vec<serde_json::Value>,
743    pub provider: String,
744    #[serde(skip_serializing_if = "Option::is_none")]
745    pub model: Option<String>,
746    pub duration_ms: f64,
747}
748
749/// Metadata for an available extraction provider (EXT-1).
750#[derive(Debug, Clone, Serialize, Deserialize)]
751pub struct ExtractionProviderInfo {
752    pub name: String,
753    pub available: bool,
754    #[serde(default)]
755    pub models: Vec<String>,
756}
757
758/// Response from `GET /v1/extract/providers` (EXT-1).
759#[derive(Debug, Clone, Serialize, Deserialize)]
760#[serde(untagged)]
761pub enum ExtractProvidersResponse {
762    List(Vec<ExtractionProviderInfo>),
763    Object {
764        providers: Vec<ExtractionProviderInfo>,
765    },
766}
767
768// ============================================================================
769// SEC-3: AES-256-GCM Encryption Key Rotation
770// ============================================================================
771
772/// Request body for `POST /v1/admin/encryption/rotate-key` (SEC-3).
773#[derive(Debug, Clone, Serialize, Deserialize)]
774pub struct RotateEncryptionKeyRequest {
775    /// New passphrase or 64-char hex key to rotate to.
776    pub new_key: String,
777    /// If set, rotate only memories in this namespace. Omit to rotate all.
778    #[serde(skip_serializing_if = "Option::is_none")]
779    pub namespace: Option<String>,
780}
781
782/// Response from `POST /v1/admin/encryption/rotate-key` (SEC-3).
783#[derive(Debug, Clone, Serialize, Deserialize)]
784pub struct RotateEncryptionKeyResponse {
785    pub rotated: usize,
786    pub skipped: usize,
787    #[serde(default)]
788    pub namespaces: Vec<String>,
789}
790
791/// Request for memory feedback
792#[derive(Debug, Clone, Serialize, Deserialize)]
793pub struct FeedbackRequest {
794    pub memory_id: String,
795    pub feedback: String,
796    #[serde(skip_serializing_if = "Option::is_none")]
797    pub relevance_score: Option<f32>,
798}
799
800/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
801#[derive(Debug, Clone, Serialize, Deserialize)]
802pub struct LegacyFeedbackResponse {
803    pub status: String,
804    pub updated_importance: Option<f32>,
805}
806
807// ============================================================================
808// CE-2: Batch Recall / Forget Types
809// ============================================================================
810
811/// Filter predicates for batch memory operations (CE-2).
812///
813/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
814/// set (server-side safety guard).
815#[derive(Debug, Clone, Serialize, Deserialize, Default)]
816pub struct BatchMemoryFilter {
817    /// Restrict to memories that carry **all** listed tags.
818    #[serde(skip_serializing_if = "Option::is_none")]
819    pub tags: Option<Vec<String>>,
820    /// Minimum importance (inclusive).
821    #[serde(skip_serializing_if = "Option::is_none")]
822    pub min_importance: Option<f32>,
823    /// Maximum importance (inclusive).
824    #[serde(skip_serializing_if = "Option::is_none")]
825    pub max_importance: Option<f32>,
826    /// Only memories created at or after this Unix timestamp (seconds).
827    #[serde(skip_serializing_if = "Option::is_none")]
828    pub created_after: Option<u64>,
829    /// Only memories created before or at this Unix timestamp (seconds).
830    #[serde(skip_serializing_if = "Option::is_none")]
831    pub created_before: Option<u64>,
832    /// Restrict to a specific memory type.
833    #[serde(skip_serializing_if = "Option::is_none")]
834    pub memory_type: Option<MemoryType>,
835    /// Restrict to memories from a specific session.
836    #[serde(skip_serializing_if = "Option::is_none")]
837    pub session_id: Option<String>,
838}
839
840impl BatchMemoryFilter {
841    /// Convenience: filter by tags.
842    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
843        self.tags = Some(tags);
844        self
845    }
846
847    /// Convenience: filter by minimum importance.
848    pub fn with_min_importance(mut self, min: f32) -> Self {
849        self.min_importance = Some(min);
850        self
851    }
852
853    /// Convenience: filter by maximum importance.
854    pub fn with_max_importance(mut self, max: f32) -> Self {
855        self.max_importance = Some(max);
856        self
857    }
858
859    /// Convenience: filter by session.
860    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
861        self.session_id = Some(session_id.into());
862        self
863    }
864}
865
866/// Request body for `POST /v1/memories/recall/batch`.
867#[derive(Debug, Clone, Serialize, Deserialize)]
868pub struct BatchRecallRequest {
869    /// Agent whose memory namespace to search.
870    pub agent_id: String,
871    /// Filter predicates to apply.
872    #[serde(default)]
873    pub filter: BatchMemoryFilter,
874    /// Maximum number of results to return (default: 100).
875    #[serde(default = "default_batch_limit")]
876    pub limit: usize,
877}
878
879fn default_batch_limit() -> usize {
880    100
881}
882
883impl BatchRecallRequest {
884    /// Create a new batch recall request for an agent.
885    pub fn new(agent_id: impl Into<String>) -> Self {
886        Self {
887            agent_id: agent_id.into(),
888            filter: BatchMemoryFilter::default(),
889            limit: 100,
890        }
891    }
892
893    /// Set filter predicates.
894    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
895        self.filter = filter;
896        self
897    }
898
899    /// Set result limit.
900    pub fn with_limit(mut self, limit: usize) -> Self {
901        self.limit = limit;
902        self
903    }
904}
905
906/// Response from `POST /v1/memories/recall/batch`.
907#[derive(Debug, Clone, Serialize, Deserialize)]
908pub struct BatchRecallResponse {
909    pub memories: Vec<RecalledMemory>,
910    /// Total memories in the agent namespace.
911    pub total: usize,
912    /// Number of memories that passed the filter.
913    pub filtered: usize,
914}
915
916/// Request body for `DELETE /v1/memories/forget/batch`.
917#[derive(Debug, Clone, Serialize, Deserialize)]
918pub struct BatchForgetRequest {
919    /// Agent whose memory namespace to purge from.
920    pub agent_id: String,
921    /// Filter predicates — **at least one must be set** (server safety guard).
922    pub filter: BatchMemoryFilter,
923}
924
925impl BatchForgetRequest {
926    /// Create a new batch forget request with the given filter.
927    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
928        Self {
929            agent_id: agent_id.into(),
930            filter,
931        }
932    }
933}
934
935/// Response from `DELETE /v1/memories/forget/batch`.
936#[derive(Debug, Clone, Serialize, Deserialize)]
937pub struct BatchForgetResponse {
938    pub deleted_count: usize,
939}
940
941// ============================================================================
942// Memory Client Methods
943// ============================================================================
944
945impl DakeraClient {
946    // ========================================================================
947    // Memory Operations
948    // ========================================================================
949
950    /// Store a memory for an agent
951    ///
952    /// # Example
953    ///
954    /// ```rust,no_run
955    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
956    ///
957    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
958    /// let client = DakeraClient::new("http://localhost:3000")?;
959    ///
960    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
961    ///     .with_importance(0.8)
962    ///     .with_tags(vec!["preferences".to_string()]);
963    ///
964    /// let response = client.store_memory(request).await?;
965    /// println!("Stored memory: {}", response.memory_id);
966    /// # Ok(())
967    /// # }
968    /// ```
969    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
970        let url = format!("{}/v1/memory/store", self.base_url);
971        let response = self.client.post(&url).json(&request).send().await?;
972        self.handle_response(response).await
973    }
974
975    /// Recall memories by semantic query
976    ///
977    /// # Example
978    ///
979    /// ```rust,no_run
980    /// use dakera_client::{DakeraClient, memory::RecallRequest};
981    ///
982    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
983    /// let client = DakeraClient::new("http://localhost:3000")?;
984    ///
985    /// let request = RecallRequest::new("agent-1", "user preferences")
986    ///     .with_top_k(10);
987    ///
988    /// let response = client.recall(request).await?;
989    /// for memory in response.memories {
990    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
991    /// }
992    /// # Ok(())
993    /// # }
994    /// ```
995    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
996        let url = format!("{}/v1/memory/recall", self.base_url);
997        let response = self.client.post(&url).json(&request).send().await?;
998        self.handle_response(response).await
999    }
1000
1001    /// Simple recall with just agent_id and query (convenience method)
1002    pub async fn recall_simple(
1003        &self,
1004        agent_id: &str,
1005        query: &str,
1006        top_k: usize,
1007    ) -> Result<RecallResponse> {
1008        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
1009            .await
1010    }
1011
1012    /// Get a specific memory by ID
1013    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
1014        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
1015        let response = self.client.get(&url).send().await?;
1016        self.handle_response(response).await
1017    }
1018
1019    /// Forget (delete) memories
1020    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
1021        let url = format!("{}/v1/memory/forget", self.base_url);
1022        let response = self.client.post(&url).json(&request).send().await?;
1023        self.handle_response(response).await
1024    }
1025
1026    /// Search memories with advanced filters
1027    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
1028        let url = format!("{}/v1/memory/search", self.base_url);
1029        let response = self.client.post(&url).json(&request).send().await?;
1030        self.handle_response(response).await
1031    }
1032
1033    /// Update an existing memory
1034    pub async fn update_memory(
1035        &self,
1036        agent_id: &str,
1037        memory_id: &str,
1038        request: UpdateMemoryRequest,
1039    ) -> Result<StoreMemoryResponse> {
1040        let url = format!(
1041            "{}/v1/agents/{}/memories/{}",
1042            self.base_url, agent_id, memory_id
1043        );
1044        let response = self.client.put(&url).json(&request).send().await?;
1045        self.handle_response(response).await
1046    }
1047
1048    /// Update importance of memories
1049    pub async fn update_importance(
1050        &self,
1051        agent_id: &str,
1052        request: UpdateImportanceRequest,
1053    ) -> Result<serde_json::Value> {
1054        let url = format!(
1055            "{}/v1/agents/{}/memories/importance",
1056            self.base_url, agent_id
1057        );
1058        let response = self.client.put(&url).json(&request).send().await?;
1059        self.handle_response(response).await
1060    }
1061
1062    /// Consolidate memories for an agent
1063    pub async fn consolidate(
1064        &self,
1065        agent_id: &str,
1066        request: ConsolidateRequest,
1067    ) -> Result<ConsolidateResponse> {
1068        // Server endpoint: POST /v1/memory/consolidate with agent_id in body
1069        let url = format!("{}/v1/memory/consolidate", self.base_url);
1070        let mut body = serde_json::to_value(&request)?;
1071        body["agent_id"] = serde_json::Value::String(agent_id.to_string());
1072        let response = self.client.post(&url).json(&body).send().await?;
1073        self.handle_response(response).await
1074    }
1075
1076    /// Submit feedback on a memory recall
1077    pub async fn memory_feedback(
1078        &self,
1079        agent_id: &str,
1080        request: FeedbackRequest,
1081    ) -> Result<LegacyFeedbackResponse> {
1082        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
1083        let response = self.client.post(&url).json(&request).send().await?;
1084        self.handle_response(response).await
1085    }
1086
1087    // ========================================================================
1088    // Memory Feedback Loop — INT-1
1089    // ========================================================================
1090
1091    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
1092    ///
1093    /// # Arguments
1094    /// * `memory_id` – The memory to give feedback on.
1095    /// * `agent_id` – The agent that owns the memory.
1096    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
1097    ///
1098    /// # Example
1099    /// ```no_run
1100    /// # use dakera_client::{DakeraClient, FeedbackSignal};
1101    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1102    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
1103    /// println!("new importance: {}", resp.new_importance);
1104    /// # Ok(()) }
1105    /// ```
1106    pub async fn feedback_memory(
1107        &self,
1108        memory_id: &str,
1109        agent_id: &str,
1110        signal: FeedbackSignal,
1111    ) -> Result<FeedbackResponse> {
1112        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1113        let body = MemoryFeedbackBody {
1114            agent_id: agent_id.to_string(),
1115            signal,
1116        };
1117        let response = self.client.post(&url).json(&body).send().await?;
1118        self.handle_response(response).await
1119    }
1120
1121    /// Get the full feedback history for a memory (INT-1).
1122    pub async fn get_memory_feedback_history(
1123        &self,
1124        memory_id: &str,
1125    ) -> Result<FeedbackHistoryResponse> {
1126        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
1127        let response = self.client.get(&url).send().await?;
1128        self.handle_response(response).await
1129    }
1130
1131    /// Get aggregate feedback counts and health score for an agent (INT-1).
1132    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
1133        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
1134        let response = self.client.get(&url).send().await?;
1135        self.handle_response(response).await
1136    }
1137
1138    /// Directly override a memory's importance score (INT-1).
1139    ///
1140    /// # Arguments
1141    /// * `memory_id` – The memory to update.
1142    /// * `agent_id` – The agent that owns the memory.
1143    /// * `importance` – New importance value (0.0–1.0).
1144    pub async fn patch_memory_importance(
1145        &self,
1146        memory_id: &str,
1147        agent_id: &str,
1148        importance: f32,
1149    ) -> Result<FeedbackResponse> {
1150        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
1151        let body = MemoryImportancePatch {
1152            agent_id: agent_id.to_string(),
1153            importance,
1154        };
1155        let response = self.client.patch(&url).json(&body).send().await?;
1156        self.handle_response(response).await
1157    }
1158
1159    /// Get overall feedback health score for an agent (INT-1).
1160    ///
1161    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
1162    /// A higher score indicates a healthier, more relevant memory store.
1163    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
1164        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
1165        let response = self.client.get(&url).send().await?;
1166        self.handle_response(response).await
1167    }
1168
1169    // ========================================================================
1170    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
1171    // ========================================================================
1172
1173    /// Traverse the knowledge graph from a memory node.
1174    ///
1175    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1176    ///
1177    /// # Arguments
1178    /// * `memory_id` – Root memory ID to start traversal from.
1179    /// * `options` – Traversal options (depth, edge type filters).
1180    ///
1181    /// # Example
1182    /// ```no_run
1183    /// # use dakera_client::{DakeraClient, GraphOptions};
1184    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1185    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
1186    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
1187    /// # Ok(()) }
1188    /// ```
1189    pub async fn memory_graph(
1190        &self,
1191        memory_id: &str,
1192        options: GraphOptions,
1193    ) -> Result<MemoryGraph> {
1194        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
1195        let depth = options.depth.unwrap_or(1);
1196        url.push_str(&format!("?depth={}", depth));
1197        if let Some(types) = &options.types {
1198            let type_strs: Vec<String> = types
1199                .iter()
1200                .map(|t| {
1201                    serde_json::to_value(t)
1202                        .unwrap()
1203                        .as_str()
1204                        .unwrap_or("")
1205                        .to_string()
1206                })
1207                .collect();
1208            if !type_strs.is_empty() {
1209                url.push_str(&format!("&types={}", type_strs.join(",")));
1210            }
1211        }
1212        let response = self.client.get(&url).send().await?;
1213        self.handle_response(response).await
1214    }
1215
1216    /// Find the shortest path between two memories in the knowledge graph.
1217    ///
1218    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1219    ///
1220    /// # Example
1221    /// ```no_run
1222    /// # use dakera_client::DakeraClient;
1223    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1224    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
1225    /// println!("{} hops: {:?}", path.hops, path.path);
1226    /// # Ok(()) }
1227    /// ```
1228    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
1229        let url = format!(
1230            "{}/v1/memories/{}/path?target={}",
1231            self.base_url,
1232            source_id,
1233            urlencoding::encode(target_id)
1234        );
1235        let response = self.client.get(&url).send().await?;
1236        self.handle_response(response).await
1237    }
1238
1239    /// Create an explicit edge between two memories.
1240    ///
1241    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1242    ///
1243    /// # Example
1244    /// ```no_run
1245    /// # use dakera_client::{DakeraClient, EdgeType};
1246    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
1247    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
1248    /// println!("Created edge: {}", resp.edge.id);
1249    /// # Ok(()) }
1250    /// ```
1251    pub async fn memory_link(
1252        &self,
1253        source_id: &str,
1254        target_id: &str,
1255        edge_type: EdgeType,
1256    ) -> Result<GraphLinkResponse> {
1257        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
1258        let request = GraphLinkRequest {
1259            target_id: target_id.to_string(),
1260            edge_type,
1261        };
1262        let response = self.client.post(&url).json(&request).send().await?;
1263        self.handle_response(response).await
1264    }
1265
1266    /// Export the full knowledge graph for an agent.
1267    ///
1268    /// Requires CE-5 (Memory Knowledge Graph) on the server.
1269    ///
1270    /// # Arguments
1271    /// * `agent_id` – Agent whose graph to export.
1272    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
1273    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
1274        let url = format!(
1275            "{}/v1/agents/{}/graph/export?format={}",
1276            self.base_url, agent_id, format
1277        );
1278        let response = self.client.get(&url).send().await?;
1279        self.handle_response(response).await
1280    }
1281
1282    // ========================================================================
1283    // Session Operations
1284    // ========================================================================
1285
1286    /// Start a new session for an agent
1287    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
1288        let url = format!("{}/v1/sessions/start", self.base_url);
1289        let request = SessionStartRequest {
1290            agent_id: agent_id.to_string(),
1291            metadata: None,
1292        };
1293        let response = self.client.post(&url).json(&request).send().await?;
1294        let resp: SessionStartResponse = self.handle_response(response).await?;
1295        Ok(resp.session)
1296    }
1297
1298    /// Start a session with metadata
1299    pub async fn start_session_with_metadata(
1300        &self,
1301        agent_id: &str,
1302        metadata: serde_json::Value,
1303    ) -> Result<Session> {
1304        let url = format!("{}/v1/sessions/start", self.base_url);
1305        let request = SessionStartRequest {
1306            agent_id: agent_id.to_string(),
1307            metadata: Some(metadata),
1308        };
1309        let response = self.client.post(&url).json(&request).send().await?;
1310        let resp: SessionStartResponse = self.handle_response(response).await?;
1311        Ok(resp.session)
1312    }
1313
1314    /// End a session, optionally with a summary.
1315    /// Returns the session state and the total memory count at close.
1316    pub async fn end_session(
1317        &self,
1318        session_id: &str,
1319        summary: Option<String>,
1320    ) -> Result<SessionEndResponse> {
1321        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
1322        let request = SessionEndRequest { summary };
1323        let response = self.client.post(&url).json(&request).send().await?;
1324        self.handle_response(response).await
1325    }
1326
1327    /// Get a session by ID
1328    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
1329        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
1330        let response = self.client.get(&url).send().await?;
1331        self.handle_response(response).await
1332    }
1333
1334    /// List sessions for an agent
1335    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
1336        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
1337        let response = self.client.get(&url).send().await?;
1338        self.handle_response(response).await
1339    }
1340
1341    /// Get memories in a session
1342    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
1343        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
1344        let response = self.client.get(&url).send().await?;
1345        self.handle_response(response).await
1346    }
1347
1348    // ========================================================================
1349    // CE-2: Batch Recall / Forget
1350    // ========================================================================
1351
1352    /// Bulk-recall memories using filter predicates (CE-2).
1353    ///
1354    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
1355    ///
1356    /// # Example
1357    ///
1358    /// ```rust,no_run
1359    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
1360    ///
1361    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1362    /// let client = DakeraClient::new("http://localhost:3000")?;
1363    ///
1364    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
1365    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
1366    /// let resp = client.batch_recall(req).await?;
1367    /// println!("Found {} memories", resp.filtered);
1368    /// # Ok(())
1369    /// # }
1370    /// ```
1371    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
1372        let url = format!("{}/v1/memories/recall/batch", self.base_url);
1373        let response = self.client.post(&url).json(&request).send().await?;
1374        self.handle_response(response).await
1375    }
1376
1377    /// Bulk-delete memories using filter predicates (CE-2).
1378    ///
1379    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
1380    /// one filter predicate to be set as a safety guard.
1381    ///
1382    /// # Example
1383    ///
1384    /// ```rust,no_run
1385    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
1386    ///
1387    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1388    /// let client = DakeraClient::new("http://localhost:3000")?;
1389    ///
1390    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
1391    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
1392    /// println!("Deleted {} memories", resp.deleted_count);
1393    /// # Ok(())
1394    /// # }
1395    /// ```
1396    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
1397        let url = format!("{}/v1/memories/forget/batch", self.base_url);
1398        let response = self.client.delete(&url).json(&request).send().await?;
1399        self.handle_response(response).await
1400    }
1401
1402    // ========================================================================
1403    // DX-1: Memory Import / Export
1404    // ========================================================================
1405
1406    /// Import memories from an external format (DX-1).
1407    ///
1408    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1409    ///
1410    /// ```no_run
1411    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1412    /// let client = dakera_client::DakeraClient::new("http://localhost:3000")?;
1413    /// let data = serde_json::json!([{"content": "hello", "agent_id": "agent-1"}]);
1414    /// let resp = client.import_memories(data, "jsonl", None, None).await?;
1415    /// println!("Imported {} memories", resp.imported_count);
1416    /// # Ok(())
1417    /// # }
1418    /// ```
1419    pub async fn import_memories(
1420        &self,
1421        data: serde_json::Value,
1422        format: &str,
1423        agent_id: Option<&str>,
1424        namespace: Option<&str>,
1425    ) -> Result<MemoryImportResponse> {
1426        let mut body = serde_json::json!({"data": data, "format": format});
1427        if let Some(aid) = agent_id {
1428            body["agent_id"] = serde_json::Value::String(aid.to_string());
1429        }
1430        if let Some(ns) = namespace {
1431            body["namespace"] = serde_json::Value::String(ns.to_string());
1432        }
1433        let url = format!("{}/v1/import", self.base_url);
1434        let response = self.client.post(&url).json(&body).send().await?;
1435        self.handle_response(response).await
1436    }
1437
1438    /// Export memories in a portable format (DX-1).
1439    ///
1440    /// Supported formats: `"jsonl"`, `"mem0"`, `"zep"`, `"csv"`.
1441    pub async fn export_memories(
1442        &self,
1443        format: &str,
1444        agent_id: Option<&str>,
1445        namespace: Option<&str>,
1446        limit: Option<u32>,
1447    ) -> Result<MemoryExportResponse> {
1448        let mut params = vec![("format", format.to_string())];
1449        if let Some(aid) = agent_id {
1450            params.push(("agent_id", aid.to_string()));
1451        }
1452        if let Some(ns) = namespace {
1453            params.push(("namespace", ns.to_string()));
1454        }
1455        if let Some(l) = limit {
1456            params.push(("limit", l.to_string()));
1457        }
1458        let url = format!("{}/v1/export", self.base_url);
1459        let response = self.client.get(&url).query(&params).send().await?;
1460        self.handle_response(response).await
1461    }
1462
1463    // ========================================================================
1464    // OBS-1: Business-Event Audit Log
1465    // ========================================================================
1466
1467    /// List paginated audit log entries (OBS-1).
1468    pub async fn list_audit_events(&self, query: AuditQuery) -> Result<AuditListResponse> {
1469        let url = format!("{}/v1/audit", self.base_url);
1470        let response = self.client.get(&url).query(&query).send().await?;
1471        self.handle_response(response).await
1472    }
1473
1474    /// Stream live audit events via SSE (OBS-1).
1475    ///
1476    /// Returns a [`tokio::sync::mpsc::Receiver`] that yields [`DakeraEvent`] results.
1477    pub async fn stream_audit_events(
1478        &self,
1479        agent_id: Option<&str>,
1480        event_type: Option<&str>,
1481    ) -> Result<tokio::sync::mpsc::Receiver<Result<crate::events::DakeraEvent>>> {
1482        let mut params: Vec<(&str, String)> = Vec::new();
1483        if let Some(aid) = agent_id {
1484            params.push(("agent_id", aid.to_string()));
1485        }
1486        if let Some(et) = event_type {
1487            params.push(("event_type", et.to_string()));
1488        }
1489        let base = format!("{}/v1/audit/stream", self.base_url);
1490        let url = if params.is_empty() {
1491            base
1492        } else {
1493            let qs = params
1494                .iter()
1495                .map(|(k, v)| format!("{}={}", k, urlencoding::encode(v)))
1496                .collect::<Vec<_>>()
1497                .join("&");
1498            format!("{}?{}", base, qs)
1499        };
1500        self.stream_sse(url).await
1501    }
1502
1503    /// Bulk-export audit log entries (OBS-1).
1504    pub async fn export_audit(
1505        &self,
1506        format: &str,
1507        agent_id: Option<&str>,
1508        event_type: Option<&str>,
1509        from_ts: Option<u64>,
1510        to_ts: Option<u64>,
1511    ) -> Result<AuditExportResponse> {
1512        let mut body = serde_json::json!({"format": format});
1513        if let Some(aid) = agent_id {
1514            body["agent_id"] = serde_json::Value::String(aid.to_string());
1515        }
1516        if let Some(et) = event_type {
1517            body["event_type"] = serde_json::Value::String(et.to_string());
1518        }
1519        if let Some(f) = from_ts {
1520            body["from"] = serde_json::Value::Number(f.into());
1521        }
1522        if let Some(t) = to_ts {
1523            body["to"] = serde_json::Value::Number(t.into());
1524        }
1525        let url = format!("{}/v1/audit/export", self.base_url);
1526        let response = self.client.post(&url).json(&body).send().await?;
1527        self.handle_response(response).await
1528    }
1529
1530    // ========================================================================
1531    // EXT-1: External Extraction Providers
1532    // ========================================================================
1533
1534    /// Extract entities from text using a pluggable provider (EXT-1).
1535    ///
1536    /// Provider hierarchy: per-request > namespace default > GLiNER (bundled).
1537    /// Supported providers: `"gliner"`, `"openai"`, `"anthropic"`, `"openrouter"`, `"ollama"`.
1538    pub async fn extract_text(
1539        &self,
1540        text: &str,
1541        namespace: Option<&str>,
1542        provider: Option<&str>,
1543        model: Option<&str>,
1544    ) -> Result<ExtractionResult> {
1545        let mut body = serde_json::json!({"text": text});
1546        if let Some(ns) = namespace {
1547            body["namespace"] = serde_json::Value::String(ns.to_string());
1548        }
1549        if let Some(p) = provider {
1550            body["provider"] = serde_json::Value::String(p.to_string());
1551        }
1552        if let Some(m) = model {
1553            body["model"] = serde_json::Value::String(m.to_string());
1554        }
1555        let url = format!("{}/v1/extract", self.base_url);
1556        let response = self.client.post(&url).json(&body).send().await?;
1557        self.handle_response(response).await
1558    }
1559
1560    /// List available extraction providers and their models (EXT-1).
1561    pub async fn list_extract_providers(&self) -> Result<Vec<ExtractionProviderInfo>> {
1562        let url = format!("{}/v1/extract/providers", self.base_url);
1563        let response = self.client.get(&url).send().await?;
1564        let result: ExtractProvidersResponse = self.handle_response(response).await?;
1565        Ok(match result {
1566            ExtractProvidersResponse::List(v) => v,
1567            ExtractProvidersResponse::Object { providers } => providers,
1568        })
1569    }
1570
1571    /// Set the default extraction provider for a namespace (EXT-1).
1572    pub async fn configure_namespace_extractor(
1573        &self,
1574        namespace: &str,
1575        provider: &str,
1576        model: Option<&str>,
1577    ) -> Result<serde_json::Value> {
1578        let mut body = serde_json::json!({"provider": provider});
1579        if let Some(m) = model {
1580            body["model"] = serde_json::Value::String(m.to_string());
1581        }
1582        let url = format!(
1583            "{}/v1/namespaces/{}/extractor",
1584            self.base_url,
1585            urlencoding::encode(namespace)
1586        );
1587        let response = self.client.patch(&url).json(&body).send().await?;
1588        self.handle_response(response).await
1589    }
1590
1591    // =========================================================================
1592    // SEC-3: AES-256-GCM Encryption Key Rotation
1593    // =========================================================================
1594
1595    /// Re-encrypt all memory content blobs with a new AES-256-GCM key (SEC-3).
1596    ///
1597    /// After this call the new key is active in the running process.
1598    /// The operator must update `DAKERA_ENCRYPTION_KEY` and restart to make
1599    /// the rotation durable across restarts.
1600    ///
1601    /// Requires Admin scope.
1602    ///
1603    /// # Arguments
1604    /// * `new_key` - New passphrase or 64-char hex key.
1605    /// * `namespace` - If `Some`, rotate only this namespace. `None` rotates all.
1606    pub async fn rotate_encryption_key(
1607        &self,
1608        new_key: &str,
1609        namespace: Option<&str>,
1610    ) -> Result<RotateEncryptionKeyResponse> {
1611        let body = RotateEncryptionKeyRequest {
1612            new_key: new_key.to_string(),
1613            namespace: namespace.map(|s| s.to_string()),
1614        };
1615        let url = format!("{}/v1/admin/encryption/rotate-key", self.base_url);
1616        let response = self.client.post(&url).json(&body).send().await?;
1617        self.handle_response(response).await
1618    }
1619}