Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    AgentFeedbackSummary, EdgeType, FeedbackHealthResponse, FeedbackHistoryResponse,
11    FeedbackResponse, FeedbackSignal, GraphExport, GraphLinkRequest, GraphLinkResponse,
12    GraphOptions, GraphPath, MemoryFeedbackBody, MemoryGraph, MemoryImportancePatch,
13};
14use crate::DakeraClient;
15
16// ============================================================================
17// Memory Types (client-side)
18// ============================================================================
19
20/// Memory type classification
21#[derive(Debug, Clone, Serialize, Deserialize, Default)]
22pub enum MemoryType {
23    #[default]
24    Episodic,
25    Semantic,
26    Procedural,
27    Working,
28}
29
30/// Store a memory request
31#[derive(Debug, Clone, Serialize, Deserialize)]
32pub struct StoreMemoryRequest {
33    pub agent_id: String,
34    pub content: String,
35    #[serde(default)]
36    pub memory_type: MemoryType,
37    #[serde(default = "default_importance")]
38    pub importance: f32,
39    #[serde(default)]
40    pub tags: Vec<String>,
41    #[serde(skip_serializing_if = "Option::is_none")]
42    pub session_id: Option<String>,
43    #[serde(skip_serializing_if = "Option::is_none")]
44    pub metadata: Option<serde_json::Value>,
45    /// Optional TTL in seconds. The memory is hard-deleted after this many
46    /// seconds from creation.
47    #[serde(skip_serializing_if = "Option::is_none")]
48    pub ttl_seconds: Option<u64>,
49    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
50    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
51    /// decay engine on expiry (DECAY-3).
52    #[serde(skip_serializing_if = "Option::is_none")]
53    pub expires_at: Option<u64>,
54}
55
56fn default_importance() -> f32 {
57    0.5
58}
59
60impl StoreMemoryRequest {
61    /// Create a new store memory request
62    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
63        Self {
64            agent_id: agent_id.into(),
65            content: content.into(),
66            memory_type: MemoryType::default(),
67            importance: 0.5,
68            tags: Vec::new(),
69            session_id: None,
70            metadata: None,
71            ttl_seconds: None,
72            expires_at: None,
73        }
74    }
75
76    /// Set memory type
77    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
78        self.memory_type = memory_type;
79        self
80    }
81
82    /// Set importance score
83    pub fn with_importance(mut self, importance: f32) -> Self {
84        self.importance = importance.clamp(0.0, 1.0);
85        self
86    }
87
88    /// Set tags
89    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
90        self.tags = tags;
91        self
92    }
93
94    /// Set session ID
95    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
96        self.session_id = Some(session_id.into());
97        self
98    }
99
100    /// Set metadata
101    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
102        self.metadata = Some(metadata);
103        self
104    }
105
106    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
107    /// from creation.
108    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
109        self.ttl_seconds = Some(ttl_seconds);
110        self
111    }
112
113    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
114    /// `ttl_seconds` when both are set (DECAY-3).
115    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
116        self.expires_at = Some(expires_at);
117        self
118    }
119}
120
121/// Stored memory response
122#[derive(Debug, Clone, Serialize, Deserialize)]
123pub struct StoreMemoryResponse {
124    pub memory_id: String,
125    pub agent_id: String,
126    pub namespace: String,
127}
128
129/// Recall memories request
130#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct RecallRequest {
132    pub agent_id: String,
133    pub query: String,
134    #[serde(default = "default_top_k")]
135    pub top_k: usize,
136    #[serde(skip_serializing_if = "Option::is_none")]
137    pub memory_type: Option<MemoryType>,
138    #[serde(default)]
139    pub min_importance: f32,
140    #[serde(skip_serializing_if = "Option::is_none")]
141    pub session_id: Option<String>,
142    #[serde(default)]
143    pub tags: Vec<String>,
144}
145
146fn default_top_k() -> usize {
147    5
148}
149
150impl RecallRequest {
151    /// Create a new recall request
152    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
153        Self {
154            agent_id: agent_id.into(),
155            query: query.into(),
156            top_k: 5,
157            memory_type: None,
158            min_importance: 0.0,
159            session_id: None,
160            tags: Vec::new(),
161        }
162    }
163
164    /// Set number of results
165    pub fn with_top_k(mut self, top_k: usize) -> Self {
166        self.top_k = top_k;
167        self
168    }
169
170    /// Filter by memory type
171    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
172        self.memory_type = Some(memory_type);
173        self
174    }
175
176    /// Set minimum importance threshold
177    pub fn with_min_importance(mut self, min: f32) -> Self {
178        self.min_importance = min;
179        self
180    }
181
182    /// Filter by session
183    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
184        self.session_id = Some(session_id.into());
185        self
186    }
187
188    /// Filter by tags
189    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
190        self.tags = tags;
191        self
192    }
193}
194
195/// A recalled memory
196#[derive(Debug, Clone, Serialize, Deserialize)]
197pub struct RecalledMemory {
198    pub id: String,
199    pub content: String,
200    pub memory_type: MemoryType,
201    pub importance: f32,
202    pub score: f32,
203    #[serde(default)]
204    pub tags: Vec<String>,
205    #[serde(skip_serializing_if = "Option::is_none")]
206    pub session_id: Option<String>,
207    #[serde(skip_serializing_if = "Option::is_none")]
208    pub metadata: Option<serde_json::Value>,
209    pub created_at: u64,
210    pub last_accessed_at: u64,
211    pub access_count: u32,
212}
213
214/// Recall response
215#[derive(Debug, Clone, Serialize, Deserialize)]
216pub struct RecallResponse {
217    pub memories: Vec<RecalledMemory>,
218    pub total_found: usize,
219}
220
221/// Forget (delete) memories request
222#[derive(Debug, Clone, Serialize, Deserialize)]
223pub struct ForgetRequest {
224    pub agent_id: String,
225    #[serde(default)]
226    pub memory_ids: Vec<String>,
227    #[serde(default)]
228    pub tags: Vec<String>,
229    #[serde(skip_serializing_if = "Option::is_none")]
230    pub session_id: Option<String>,
231    #[serde(skip_serializing_if = "Option::is_none")]
232    pub before_timestamp: Option<u64>,
233}
234
235impl ForgetRequest {
236    /// Forget specific memories by ID
237    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
238        Self {
239            agent_id: agent_id.into(),
240            memory_ids: ids,
241            tags: Vec::new(),
242            session_id: None,
243            before_timestamp: None,
244        }
245    }
246
247    /// Forget memories with specific tags
248    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
249        Self {
250            agent_id: agent_id.into(),
251            memory_ids: Vec::new(),
252            tags,
253            session_id: None,
254            before_timestamp: None,
255        }
256    }
257
258    /// Forget all memories in a session
259    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
260        Self {
261            agent_id: agent_id.into(),
262            memory_ids: Vec::new(),
263            tags: Vec::new(),
264            session_id: Some(session_id.into()),
265            before_timestamp: None,
266        }
267    }
268}
269
270/// Forget response
271#[derive(Debug, Clone, Serialize, Deserialize)]
272pub struct ForgetResponse {
273    pub deleted_count: u64,
274}
275
276/// Session start request
277#[derive(Debug, Clone, Serialize, Deserialize)]
278pub struct SessionStartRequest {
279    pub agent_id: String,
280    #[serde(skip_serializing_if = "Option::is_none")]
281    pub metadata: Option<serde_json::Value>,
282}
283
284/// Session information
285#[derive(Debug, Clone, Serialize, Deserialize)]
286pub struct Session {
287    pub id: String,
288    pub agent_id: String,
289    pub started_at: u64,
290    #[serde(skip_serializing_if = "Option::is_none")]
291    pub ended_at: Option<u64>,
292    #[serde(skip_serializing_if = "Option::is_none")]
293    pub summary: Option<String>,
294    #[serde(skip_serializing_if = "Option::is_none")]
295    pub metadata: Option<serde_json::Value>,
296}
297
298/// Session end request
299#[derive(Debug, Clone, Serialize, Deserialize)]
300pub struct SessionEndRequest {
301    #[serde(skip_serializing_if = "Option::is_none")]
302    pub summary: Option<String>,
303}
304
305/// Request to update a memory
306#[derive(Debug, Clone, Serialize, Deserialize)]
307pub struct UpdateMemoryRequest {
308    #[serde(skip_serializing_if = "Option::is_none")]
309    pub content: Option<String>,
310    #[serde(skip_serializing_if = "Option::is_none")]
311    pub metadata: Option<serde_json::Value>,
312    #[serde(skip_serializing_if = "Option::is_none")]
313    pub memory_type: Option<MemoryType>,
314}
315
316/// Request to update memory importance
317#[derive(Debug, Clone, Serialize, Deserialize)]
318pub struct UpdateImportanceRequest {
319    pub memory_ids: Vec<String>,
320    pub importance: f32,
321}
322
323/// Request to consolidate memories
324#[derive(Debug, Clone, Serialize, Deserialize)]
325pub struct ConsolidateRequest {
326    #[serde(skip_serializing_if = "Option::is_none")]
327    pub memory_type: Option<String>,
328    #[serde(skip_serializing_if = "Option::is_none")]
329    pub threshold: Option<f32>,
330    #[serde(default)]
331    pub dry_run: bool,
332}
333
334/// Response from consolidation
335#[derive(Debug, Clone, Serialize, Deserialize)]
336pub struct ConsolidateResponse {
337    pub consolidated_count: usize,
338    pub removed_count: usize,
339    pub new_memories: Vec<String>,
340}
341
342/// Request for memory feedback
343#[derive(Debug, Clone, Serialize, Deserialize)]
344pub struct FeedbackRequest {
345    pub memory_id: String,
346    pub feedback: String,
347    #[serde(skip_serializing_if = "Option::is_none")]
348    pub relevance_score: Option<f32>,
349}
350
351/// Response from legacy feedback endpoint (POST /v1/agents/:id/memories/feedback)
352#[derive(Debug, Clone, Serialize, Deserialize)]
353pub struct LegacyFeedbackResponse {
354    pub status: String,
355    pub updated_importance: Option<f32>,
356}
357
358// ============================================================================
359// CE-2: Batch Recall / Forget Types
360// ============================================================================
361
362/// Filter predicates for batch memory operations (CE-2).
363///
364/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
365/// set (server-side safety guard).
366#[derive(Debug, Clone, Serialize, Deserialize, Default)]
367pub struct BatchMemoryFilter {
368    /// Restrict to memories that carry **all** listed tags.
369    #[serde(skip_serializing_if = "Option::is_none")]
370    pub tags: Option<Vec<String>>,
371    /// Minimum importance (inclusive).
372    #[serde(skip_serializing_if = "Option::is_none")]
373    pub min_importance: Option<f32>,
374    /// Maximum importance (inclusive).
375    #[serde(skip_serializing_if = "Option::is_none")]
376    pub max_importance: Option<f32>,
377    /// Only memories created at or after this Unix timestamp (seconds).
378    #[serde(skip_serializing_if = "Option::is_none")]
379    pub created_after: Option<u64>,
380    /// Only memories created before or at this Unix timestamp (seconds).
381    #[serde(skip_serializing_if = "Option::is_none")]
382    pub created_before: Option<u64>,
383    /// Restrict to a specific memory type.
384    #[serde(skip_serializing_if = "Option::is_none")]
385    pub memory_type: Option<MemoryType>,
386    /// Restrict to memories from a specific session.
387    #[serde(skip_serializing_if = "Option::is_none")]
388    pub session_id: Option<String>,
389}
390
391impl BatchMemoryFilter {
392    /// Convenience: filter by tags.
393    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
394        self.tags = Some(tags);
395        self
396    }
397
398    /// Convenience: filter by minimum importance.
399    pub fn with_min_importance(mut self, min: f32) -> Self {
400        self.min_importance = Some(min);
401        self
402    }
403
404    /// Convenience: filter by maximum importance.
405    pub fn with_max_importance(mut self, max: f32) -> Self {
406        self.max_importance = Some(max);
407        self
408    }
409
410    /// Convenience: filter by session.
411    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
412        self.session_id = Some(session_id.into());
413        self
414    }
415}
416
417/// Request body for `POST /v1/memories/recall/batch`.
418#[derive(Debug, Clone, Serialize, Deserialize)]
419pub struct BatchRecallRequest {
420    /// Agent whose memory namespace to search.
421    pub agent_id: String,
422    /// Filter predicates to apply.
423    #[serde(default)]
424    pub filter: BatchMemoryFilter,
425    /// Maximum number of results to return (default: 100).
426    #[serde(default = "default_batch_limit")]
427    pub limit: usize,
428}
429
430fn default_batch_limit() -> usize {
431    100
432}
433
434impl BatchRecallRequest {
435    /// Create a new batch recall request for an agent.
436    pub fn new(agent_id: impl Into<String>) -> Self {
437        Self {
438            agent_id: agent_id.into(),
439            filter: BatchMemoryFilter::default(),
440            limit: 100,
441        }
442    }
443
444    /// Set filter predicates.
445    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
446        self.filter = filter;
447        self
448    }
449
450    /// Set result limit.
451    pub fn with_limit(mut self, limit: usize) -> Self {
452        self.limit = limit;
453        self
454    }
455}
456
457/// Response from `POST /v1/memories/recall/batch`.
458#[derive(Debug, Clone, Serialize, Deserialize)]
459pub struct BatchRecallResponse {
460    pub memories: Vec<RecalledMemory>,
461    /// Total memories in the agent namespace.
462    pub total: usize,
463    /// Number of memories that passed the filter.
464    pub filtered: usize,
465}
466
467/// Request body for `DELETE /v1/memories/forget/batch`.
468#[derive(Debug, Clone, Serialize, Deserialize)]
469pub struct BatchForgetRequest {
470    /// Agent whose memory namespace to purge from.
471    pub agent_id: String,
472    /// Filter predicates — **at least one must be set** (server safety guard).
473    pub filter: BatchMemoryFilter,
474}
475
476impl BatchForgetRequest {
477    /// Create a new batch forget request with the given filter.
478    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
479        Self {
480            agent_id: agent_id.into(),
481            filter,
482        }
483    }
484}
485
486/// Response from `DELETE /v1/memories/forget/batch`.
487#[derive(Debug, Clone, Serialize, Deserialize)]
488pub struct BatchForgetResponse {
489    pub deleted_count: usize,
490}
491
492// ============================================================================
493// Memory Client Methods
494// ============================================================================
495
496impl DakeraClient {
497    // ========================================================================
498    // Memory Operations
499    // ========================================================================
500
501    /// Store a memory for an agent
502    ///
503    /// # Example
504    ///
505    /// ```rust,no_run
506    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
507    ///
508    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
509    /// let client = DakeraClient::new("http://localhost:3000")?;
510    ///
511    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
512    ///     .with_importance(0.8)
513    ///     .with_tags(vec!["preferences".to_string()]);
514    ///
515    /// let response = client.store_memory(request).await?;
516    /// println!("Stored memory: {}", response.memory_id);
517    /// # Ok(())
518    /// # }
519    /// ```
520    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
521        let url = format!("{}/v1/memory/store", self.base_url);
522        let response = self.client.post(&url).json(&request).send().await?;
523        self.handle_response(response).await
524    }
525
526    /// Recall memories by semantic query
527    ///
528    /// # Example
529    ///
530    /// ```rust,no_run
531    /// use dakera_client::{DakeraClient, memory::RecallRequest};
532    ///
533    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
534    /// let client = DakeraClient::new("http://localhost:3000")?;
535    ///
536    /// let request = RecallRequest::new("agent-1", "user preferences")
537    ///     .with_top_k(10);
538    ///
539    /// let response = client.recall(request).await?;
540    /// for memory in response.memories {
541    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
542    /// }
543    /// # Ok(())
544    /// # }
545    /// ```
546    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
547        let url = format!("{}/v1/memory/recall", self.base_url);
548        let response = self.client.post(&url).json(&request).send().await?;
549        self.handle_response(response).await
550    }
551
552    /// Simple recall with just agent_id and query (convenience method)
553    pub async fn recall_simple(
554        &self,
555        agent_id: &str,
556        query: &str,
557        top_k: usize,
558    ) -> Result<RecallResponse> {
559        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
560            .await
561    }
562
563    /// Get a specific memory by ID
564    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
565        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
566        let response = self.client.get(&url).send().await?;
567        self.handle_response(response).await
568    }
569
570    /// Forget (delete) memories
571    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
572        let url = format!("{}/v1/memory/forget", self.base_url);
573        let response = self.client.post(&url).json(&request).send().await?;
574        self.handle_response(response).await
575    }
576
577    /// Search memories with advanced filters
578    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
579        let url = format!("{}/v1/memory/search", self.base_url);
580        let response = self.client.post(&url).json(&request).send().await?;
581        self.handle_response(response).await
582    }
583
584    /// Update an existing memory
585    pub async fn update_memory(
586        &self,
587        agent_id: &str,
588        memory_id: &str,
589        request: UpdateMemoryRequest,
590    ) -> Result<StoreMemoryResponse> {
591        let url = format!(
592            "{}/v1/agents/{}/memories/{}",
593            self.base_url, agent_id, memory_id
594        );
595        let response = self.client.put(&url).json(&request).send().await?;
596        self.handle_response(response).await
597    }
598
599    /// Update importance of memories
600    pub async fn update_importance(
601        &self,
602        agent_id: &str,
603        request: UpdateImportanceRequest,
604    ) -> Result<serde_json::Value> {
605        let url = format!(
606            "{}/v1/agents/{}/memories/importance",
607            self.base_url, agent_id
608        );
609        let response = self.client.put(&url).json(&request).send().await?;
610        self.handle_response(response).await
611    }
612
613    /// Consolidate memories for an agent
614    pub async fn consolidate(
615        &self,
616        agent_id: &str,
617        request: ConsolidateRequest,
618    ) -> Result<ConsolidateResponse> {
619        let url = format!(
620            "{}/v1/agents/{}/memories/consolidate",
621            self.base_url, agent_id
622        );
623        let response = self.client.post(&url).json(&request).send().await?;
624        self.handle_response(response).await
625    }
626
627    /// Submit feedback on a memory recall
628    pub async fn memory_feedback(
629        &self,
630        agent_id: &str,
631        request: FeedbackRequest,
632    ) -> Result<LegacyFeedbackResponse> {
633        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
634        let response = self.client.post(&url).json(&request).send().await?;
635        self.handle_response(response).await
636    }
637
638    // ========================================================================
639    // Memory Feedback Loop — INT-1
640    // ========================================================================
641
642    /// Submit upvote/downvote/flag feedback on a memory (INT-1).
643    ///
644    /// # Arguments
645    /// * `memory_id` – The memory to give feedback on.
646    /// * `agent_id` – The agent that owns the memory.
647    /// * `signal` – [`FeedbackSignal`] value: `Upvote`, `Downvote`, or `Flag`.
648    ///
649    /// # Example
650    /// ```no_run
651    /// # use dakera_client::{DakeraClient, FeedbackSignal};
652    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
653    /// let resp = client.feedback_memory("mem-abc", "agent-1", FeedbackSignal::Upvote).await?;
654    /// println!("new importance: {}", resp.new_importance);
655    /// # Ok(()) }
656    /// ```
657    pub async fn feedback_memory(
658        &self,
659        memory_id: &str,
660        agent_id: &str,
661        signal: FeedbackSignal,
662    ) -> Result<FeedbackResponse> {
663        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
664        let body = MemoryFeedbackBody {
665            agent_id: agent_id.to_string(),
666            signal,
667        };
668        let response = self.client.post(&url).json(&body).send().await?;
669        self.handle_response(response).await
670    }
671
672    /// Get the full feedback history for a memory (INT-1).
673    pub async fn get_memory_feedback_history(
674        &self,
675        memory_id: &str,
676    ) -> Result<FeedbackHistoryResponse> {
677        let url = format!("{}/v1/memories/{}/feedback", self.base_url, memory_id);
678        let response = self.client.get(&url).send().await?;
679        self.handle_response(response).await
680    }
681
682    /// Get aggregate feedback counts and health score for an agent (INT-1).
683    pub async fn get_agent_feedback_summary(&self, agent_id: &str) -> Result<AgentFeedbackSummary> {
684        let url = format!("{}/v1/agents/{}/feedback/summary", self.base_url, agent_id);
685        let response = self.client.get(&url).send().await?;
686        self.handle_response(response).await
687    }
688
689    /// Directly override a memory's importance score (INT-1).
690    ///
691    /// # Arguments
692    /// * `memory_id` – The memory to update.
693    /// * `agent_id` – The agent that owns the memory.
694    /// * `importance` – New importance value (0.0–1.0).
695    pub async fn patch_memory_importance(
696        &self,
697        memory_id: &str,
698        agent_id: &str,
699        importance: f32,
700    ) -> Result<FeedbackResponse> {
701        let url = format!("{}/v1/memories/{}/importance", self.base_url, memory_id);
702        let body = MemoryImportancePatch {
703            agent_id: agent_id.to_string(),
704            importance,
705        };
706        let response = self.client.patch(&url).json(&body).send().await?;
707        self.handle_response(response).await
708    }
709
710    /// Get overall feedback health score for an agent (INT-1).
711    ///
712    /// The health score is the mean importance of all non-expired memories (0.0–1.0).
713    /// A higher score indicates a healthier, more relevant memory store.
714    pub async fn get_feedback_health(&self, agent_id: &str) -> Result<FeedbackHealthResponse> {
715        let url = format!("{}/v1/feedback/health?agent_id={}", self.base_url, agent_id);
716        let response = self.client.get(&url).send().await?;
717        self.handle_response(response).await
718    }
719
720    // ========================================================================
721    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
722    // ========================================================================
723
724    /// Traverse the knowledge graph from a memory node.
725    ///
726    /// Requires CE-5 (Memory Knowledge Graph) on the server.
727    ///
728    /// # Arguments
729    /// * `memory_id` – Root memory ID to start traversal from.
730    /// * `options` – Traversal options (depth, edge type filters).
731    ///
732    /// # Example
733    /// ```no_run
734    /// # use dakera_client::{DakeraClient, GraphOptions};
735    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
736    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
737    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
738    /// # Ok(()) }
739    /// ```
740    pub async fn memory_graph(
741        &self,
742        memory_id: &str,
743        options: GraphOptions,
744    ) -> Result<MemoryGraph> {
745        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
746        let depth = options.depth.unwrap_or(1);
747        url.push_str(&format!("?depth={}", depth));
748        if let Some(types) = &options.types {
749            let type_strs: Vec<String> = types
750                .iter()
751                .map(|t| {
752                    serde_json::to_value(t)
753                        .unwrap()
754                        .as_str()
755                        .unwrap_or("")
756                        .to_string()
757                })
758                .collect();
759            if !type_strs.is_empty() {
760                url.push_str(&format!("&types={}", type_strs.join(",")));
761            }
762        }
763        let response = self.client.get(&url).send().await?;
764        self.handle_response(response).await
765    }
766
767    /// Find the shortest path between two memories in the knowledge graph.
768    ///
769    /// Requires CE-5 (Memory Knowledge Graph) on the server.
770    ///
771    /// # Example
772    /// ```no_run
773    /// # use dakera_client::DakeraClient;
774    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
775    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
776    /// println!("{} hops: {:?}", path.hops, path.path);
777    /// # Ok(()) }
778    /// ```
779    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
780        let url = format!(
781            "{}/v1/memories/{}/path?target={}",
782            self.base_url,
783            source_id,
784            urlencoding::encode(target_id)
785        );
786        let response = self.client.get(&url).send().await?;
787        self.handle_response(response).await
788    }
789
790    /// Create an explicit edge between two memories.
791    ///
792    /// Requires CE-5 (Memory Knowledge Graph) on the server.
793    ///
794    /// # Example
795    /// ```no_run
796    /// # use dakera_client::{DakeraClient, EdgeType};
797    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
798    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
799    /// println!("Created edge: {}", resp.edge.id);
800    /// # Ok(()) }
801    /// ```
802    pub async fn memory_link(
803        &self,
804        source_id: &str,
805        target_id: &str,
806        edge_type: EdgeType,
807    ) -> Result<GraphLinkResponse> {
808        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
809        let request = GraphLinkRequest {
810            target_id: target_id.to_string(),
811            edge_type,
812        };
813        let response = self.client.post(&url).json(&request).send().await?;
814        self.handle_response(response).await
815    }
816
817    /// Export the full knowledge graph for an agent.
818    ///
819    /// Requires CE-5 (Memory Knowledge Graph) on the server.
820    ///
821    /// # Arguments
822    /// * `agent_id` – Agent whose graph to export.
823    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
824    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
825        let url = format!(
826            "{}/v1/agents/{}/graph/export?format={}",
827            self.base_url, agent_id, format
828        );
829        let response = self.client.get(&url).send().await?;
830        self.handle_response(response).await
831    }
832
833    // ========================================================================
834    // Session Operations
835    // ========================================================================
836
837    /// Start a new session for an agent
838    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
839        let url = format!("{}/v1/sessions/start", self.base_url);
840        let request = SessionStartRequest {
841            agent_id: agent_id.to_string(),
842            metadata: None,
843        };
844        let response = self.client.post(&url).json(&request).send().await?;
845        self.handle_response(response).await
846    }
847
848    /// Start a session with metadata
849    pub async fn start_session_with_metadata(
850        &self,
851        agent_id: &str,
852        metadata: serde_json::Value,
853    ) -> Result<Session> {
854        let url = format!("{}/v1/sessions/start", self.base_url);
855        let request = SessionStartRequest {
856            agent_id: agent_id.to_string(),
857            metadata: Some(metadata),
858        };
859        let response = self.client.post(&url).json(&request).send().await?;
860        self.handle_response(response).await
861    }
862
863    /// End a session, optionally with a summary
864    pub async fn end_session(&self, session_id: &str, summary: Option<String>) -> Result<Session> {
865        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
866        let request = SessionEndRequest { summary };
867        let response = self.client.post(&url).json(&request).send().await?;
868        self.handle_response(response).await
869    }
870
871    /// Get a session by ID
872    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
873        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
874        let response = self.client.get(&url).send().await?;
875        self.handle_response(response).await
876    }
877
878    /// List sessions for an agent
879    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
880        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
881        let response = self.client.get(&url).send().await?;
882        self.handle_response(response).await
883    }
884
885    /// Get memories in a session
886    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
887        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
888        let response = self.client.get(&url).send().await?;
889        self.handle_response(response).await
890    }
891
892    // ========================================================================
893    // CE-2: Batch Recall / Forget
894    // ========================================================================
895
896    /// Bulk-recall memories using filter predicates (CE-2).
897    ///
898    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
899    ///
900    /// # Example
901    ///
902    /// ```rust,no_run
903    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
904    ///
905    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
906    /// let client = DakeraClient::new("http://localhost:3000")?;
907    ///
908    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
909    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
910    /// let resp = client.batch_recall(req).await?;
911    /// println!("Found {} memories", resp.filtered);
912    /// # Ok(())
913    /// # }
914    /// ```
915    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
916        let url = format!("{}/v1/memories/recall/batch", self.base_url);
917        let response = self.client.post(&url).json(&request).send().await?;
918        self.handle_response(response).await
919    }
920
921    /// Bulk-delete memories using filter predicates (CE-2).
922    ///
923    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
924    /// one filter predicate to be set as a safety guard.
925    ///
926    /// # Example
927    ///
928    /// ```rust,no_run
929    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
930    ///
931    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
932    /// let client = DakeraClient::new("http://localhost:3000")?;
933    ///
934    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
935    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
936    /// println!("Deleted {} memories", resp.deleted_count);
937    /// # Ok(())
938    /// # }
939    /// ```
940    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
941        let url = format!("{}/v1/memories/forget/batch", self.base_url);
942        let response = self.client.delete(&url).json(&request).send().await?;
943        self.handle_response(response).await
944    }
945}