Skip to main content

dakera_client/
memory.rs

1//! Memory-oriented client methods for Dakera AI Agent Memory Platform
2//!
3//! Provides high-level methods for storing, recalling, and managing
4//! agent memories and sessions through the Dakera API.
5
6use serde::{Deserialize, Serialize};
7
8use crate::error::Result;
9use crate::types::{
10    EdgeType, GraphExport, GraphLinkRequest, GraphLinkResponse, GraphOptions, GraphPath,
11    MemoryGraph,
12};
13use crate::DakeraClient;
14
15// ============================================================================
16// Memory Types (client-side)
17// ============================================================================
18
19/// Memory type classification
20#[derive(Debug, Clone, Serialize, Deserialize, Default)]
21pub enum MemoryType {
22    #[default]
23    Episodic,
24    Semantic,
25    Procedural,
26    Working,
27}
28
29/// Store a memory request
30#[derive(Debug, Clone, Serialize, Deserialize)]
31pub struct StoreMemoryRequest {
32    pub agent_id: String,
33    pub content: String,
34    #[serde(default)]
35    pub memory_type: MemoryType,
36    #[serde(default = "default_importance")]
37    pub importance: f32,
38    #[serde(default)]
39    pub tags: Vec<String>,
40    #[serde(skip_serializing_if = "Option::is_none")]
41    pub session_id: Option<String>,
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub metadata: Option<serde_json::Value>,
44    /// Optional TTL in seconds. The memory is hard-deleted after this many
45    /// seconds from creation.
46    #[serde(skip_serializing_if = "Option::is_none")]
47    pub ttl_seconds: Option<u64>,
48    /// Optional explicit expiry as a Unix timestamp (seconds). Takes precedence
49    /// over `ttl_seconds` when both are set. The memory is hard-deleted by the
50    /// decay engine on expiry (DECAY-3).
51    #[serde(skip_serializing_if = "Option::is_none")]
52    pub expires_at: Option<u64>,
53}
54
55fn default_importance() -> f32 {
56    0.5
57}
58
59impl StoreMemoryRequest {
60    /// Create a new store memory request
61    pub fn new(agent_id: impl Into<String>, content: impl Into<String>) -> Self {
62        Self {
63            agent_id: agent_id.into(),
64            content: content.into(),
65            memory_type: MemoryType::default(),
66            importance: 0.5,
67            tags: Vec::new(),
68            session_id: None,
69            metadata: None,
70            ttl_seconds: None,
71            expires_at: None,
72        }
73    }
74
75    /// Set memory type
76    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
77        self.memory_type = memory_type;
78        self
79    }
80
81    /// Set importance score
82    pub fn with_importance(mut self, importance: f32) -> Self {
83        self.importance = importance.clamp(0.0, 1.0);
84        self
85    }
86
87    /// Set tags
88    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
89        self.tags = tags;
90        self
91    }
92
93    /// Set session ID
94    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
95        self.session_id = Some(session_id.into());
96        self
97    }
98
99    /// Set metadata
100    pub fn with_metadata(mut self, metadata: serde_json::Value) -> Self {
101        self.metadata = Some(metadata);
102        self
103    }
104
105    /// Set TTL in seconds. The memory is hard-deleted after this many seconds
106    /// from creation.
107    pub fn with_ttl(mut self, ttl_seconds: u64) -> Self {
108        self.ttl_seconds = Some(ttl_seconds);
109        self
110    }
111
112    /// Set an explicit expiry Unix timestamp (seconds). Takes precedence over
113    /// `ttl_seconds` when both are set (DECAY-3).
114    pub fn with_expires_at(mut self, expires_at: u64) -> Self {
115        self.expires_at = Some(expires_at);
116        self
117    }
118}
119
120/// Stored memory response
121#[derive(Debug, Clone, Serialize, Deserialize)]
122pub struct StoreMemoryResponse {
123    pub memory_id: String,
124    pub agent_id: String,
125    pub namespace: String,
126}
127
128/// Recall memories request
129#[derive(Debug, Clone, Serialize, Deserialize)]
130pub struct RecallRequest {
131    pub agent_id: String,
132    pub query: String,
133    #[serde(default = "default_top_k")]
134    pub top_k: usize,
135    #[serde(skip_serializing_if = "Option::is_none")]
136    pub memory_type: Option<MemoryType>,
137    #[serde(default)]
138    pub min_importance: f32,
139    #[serde(skip_serializing_if = "Option::is_none")]
140    pub session_id: Option<String>,
141    #[serde(default)]
142    pub tags: Vec<String>,
143}
144
145fn default_top_k() -> usize {
146    5
147}
148
149impl RecallRequest {
150    /// Create a new recall request
151    pub fn new(agent_id: impl Into<String>, query: impl Into<String>) -> Self {
152        Self {
153            agent_id: agent_id.into(),
154            query: query.into(),
155            top_k: 5,
156            memory_type: None,
157            min_importance: 0.0,
158            session_id: None,
159            tags: Vec::new(),
160        }
161    }
162
163    /// Set number of results
164    pub fn with_top_k(mut self, top_k: usize) -> Self {
165        self.top_k = top_k;
166        self
167    }
168
169    /// Filter by memory type
170    pub fn with_type(mut self, memory_type: MemoryType) -> Self {
171        self.memory_type = Some(memory_type);
172        self
173    }
174
175    /// Set minimum importance threshold
176    pub fn with_min_importance(mut self, min: f32) -> Self {
177        self.min_importance = min;
178        self
179    }
180
181    /// Filter by session
182    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
183        self.session_id = Some(session_id.into());
184        self
185    }
186
187    /// Filter by tags
188    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
189        self.tags = tags;
190        self
191    }
192}
193
194/// A recalled memory
195#[derive(Debug, Clone, Serialize, Deserialize)]
196pub struct RecalledMemory {
197    pub id: String,
198    pub content: String,
199    pub memory_type: MemoryType,
200    pub importance: f32,
201    pub score: f32,
202    #[serde(default)]
203    pub tags: Vec<String>,
204    #[serde(skip_serializing_if = "Option::is_none")]
205    pub session_id: Option<String>,
206    #[serde(skip_serializing_if = "Option::is_none")]
207    pub metadata: Option<serde_json::Value>,
208    pub created_at: u64,
209    pub last_accessed_at: u64,
210    pub access_count: u32,
211}
212
213/// Recall response
214#[derive(Debug, Clone, Serialize, Deserialize)]
215pub struct RecallResponse {
216    pub memories: Vec<RecalledMemory>,
217    pub total_found: usize,
218}
219
220/// Forget (delete) memories request
221#[derive(Debug, Clone, Serialize, Deserialize)]
222pub struct ForgetRequest {
223    pub agent_id: String,
224    #[serde(default)]
225    pub memory_ids: Vec<String>,
226    #[serde(default)]
227    pub tags: Vec<String>,
228    #[serde(skip_serializing_if = "Option::is_none")]
229    pub session_id: Option<String>,
230    #[serde(skip_serializing_if = "Option::is_none")]
231    pub before_timestamp: Option<u64>,
232}
233
234impl ForgetRequest {
235    /// Forget specific memories by ID
236    pub fn by_ids(agent_id: impl Into<String>, ids: Vec<String>) -> Self {
237        Self {
238            agent_id: agent_id.into(),
239            memory_ids: ids,
240            tags: Vec::new(),
241            session_id: None,
242            before_timestamp: None,
243        }
244    }
245
246    /// Forget memories with specific tags
247    pub fn by_tags(agent_id: impl Into<String>, tags: Vec<String>) -> Self {
248        Self {
249            agent_id: agent_id.into(),
250            memory_ids: Vec::new(),
251            tags,
252            session_id: None,
253            before_timestamp: None,
254        }
255    }
256
257    /// Forget all memories in a session
258    pub fn by_session(agent_id: impl Into<String>, session_id: impl Into<String>) -> Self {
259        Self {
260            agent_id: agent_id.into(),
261            memory_ids: Vec::new(),
262            tags: Vec::new(),
263            session_id: Some(session_id.into()),
264            before_timestamp: None,
265        }
266    }
267}
268
269/// Forget response
270#[derive(Debug, Clone, Serialize, Deserialize)]
271pub struct ForgetResponse {
272    pub deleted_count: u64,
273}
274
275/// Session start request
276#[derive(Debug, Clone, Serialize, Deserialize)]
277pub struct SessionStartRequest {
278    pub agent_id: String,
279    #[serde(skip_serializing_if = "Option::is_none")]
280    pub metadata: Option<serde_json::Value>,
281}
282
283/// Session information
284#[derive(Debug, Clone, Serialize, Deserialize)]
285pub struct Session {
286    pub id: String,
287    pub agent_id: String,
288    pub started_at: u64,
289    #[serde(skip_serializing_if = "Option::is_none")]
290    pub ended_at: Option<u64>,
291    #[serde(skip_serializing_if = "Option::is_none")]
292    pub summary: Option<String>,
293    #[serde(skip_serializing_if = "Option::is_none")]
294    pub metadata: Option<serde_json::Value>,
295}
296
297/// Session end request
298#[derive(Debug, Clone, Serialize, Deserialize)]
299pub struct SessionEndRequest {
300    #[serde(skip_serializing_if = "Option::is_none")]
301    pub summary: Option<String>,
302}
303
304/// Request to update a memory
305#[derive(Debug, Clone, Serialize, Deserialize)]
306pub struct UpdateMemoryRequest {
307    #[serde(skip_serializing_if = "Option::is_none")]
308    pub content: Option<String>,
309    #[serde(skip_serializing_if = "Option::is_none")]
310    pub metadata: Option<serde_json::Value>,
311    #[serde(skip_serializing_if = "Option::is_none")]
312    pub memory_type: Option<MemoryType>,
313}
314
315/// Request to update memory importance
316#[derive(Debug, Clone, Serialize, Deserialize)]
317pub struct UpdateImportanceRequest {
318    pub memory_ids: Vec<String>,
319    pub importance: f32,
320}
321
322/// Request to consolidate memories
323#[derive(Debug, Clone, Serialize, Deserialize)]
324pub struct ConsolidateRequest {
325    #[serde(skip_serializing_if = "Option::is_none")]
326    pub memory_type: Option<String>,
327    #[serde(skip_serializing_if = "Option::is_none")]
328    pub threshold: Option<f32>,
329    #[serde(default)]
330    pub dry_run: bool,
331}
332
333/// Response from consolidation
334#[derive(Debug, Clone, Serialize, Deserialize)]
335pub struct ConsolidateResponse {
336    pub consolidated_count: usize,
337    pub removed_count: usize,
338    pub new_memories: Vec<String>,
339}
340
341/// Request for memory feedback
342#[derive(Debug, Clone, Serialize, Deserialize)]
343pub struct FeedbackRequest {
344    pub memory_id: String,
345    pub feedback: String,
346    #[serde(skip_serializing_if = "Option::is_none")]
347    pub relevance_score: Option<f32>,
348}
349
350/// Response from feedback
351#[derive(Debug, Clone, Serialize, Deserialize)]
352pub struct FeedbackResponse {
353    pub status: String,
354    pub updated_importance: Option<f32>,
355}
356
357// ============================================================================
358// CE-2: Batch Recall / Forget Types
359// ============================================================================
360
361/// Filter predicates for batch memory operations (CE-2).
362///
363/// All fields are optional.  For [`BatchForgetRequest`] at least one must be
364/// set (server-side safety guard).
365#[derive(Debug, Clone, Serialize, Deserialize, Default)]
366pub struct BatchMemoryFilter {
367    /// Restrict to memories that carry **all** listed tags.
368    #[serde(skip_serializing_if = "Option::is_none")]
369    pub tags: Option<Vec<String>>,
370    /// Minimum importance (inclusive).
371    #[serde(skip_serializing_if = "Option::is_none")]
372    pub min_importance: Option<f32>,
373    /// Maximum importance (inclusive).
374    #[serde(skip_serializing_if = "Option::is_none")]
375    pub max_importance: Option<f32>,
376    /// Only memories created at or after this Unix timestamp (seconds).
377    #[serde(skip_serializing_if = "Option::is_none")]
378    pub created_after: Option<u64>,
379    /// Only memories created before or at this Unix timestamp (seconds).
380    #[serde(skip_serializing_if = "Option::is_none")]
381    pub created_before: Option<u64>,
382    /// Restrict to a specific memory type.
383    #[serde(skip_serializing_if = "Option::is_none")]
384    pub memory_type: Option<MemoryType>,
385    /// Restrict to memories from a specific session.
386    #[serde(skip_serializing_if = "Option::is_none")]
387    pub session_id: Option<String>,
388}
389
390impl BatchMemoryFilter {
391    /// Convenience: filter by tags.
392    pub fn with_tags(mut self, tags: Vec<String>) -> Self {
393        self.tags = Some(tags);
394        self
395    }
396
397    /// Convenience: filter by minimum importance.
398    pub fn with_min_importance(mut self, min: f32) -> Self {
399        self.min_importance = Some(min);
400        self
401    }
402
403    /// Convenience: filter by maximum importance.
404    pub fn with_max_importance(mut self, max: f32) -> Self {
405        self.max_importance = Some(max);
406        self
407    }
408
409    /// Convenience: filter by session.
410    pub fn with_session(mut self, session_id: impl Into<String>) -> Self {
411        self.session_id = Some(session_id.into());
412        self
413    }
414}
415
416/// Request body for `POST /v1/memories/recall/batch`.
417#[derive(Debug, Clone, Serialize, Deserialize)]
418pub struct BatchRecallRequest {
419    /// Agent whose memory namespace to search.
420    pub agent_id: String,
421    /// Filter predicates to apply.
422    #[serde(default)]
423    pub filter: BatchMemoryFilter,
424    /// Maximum number of results to return (default: 100).
425    #[serde(default = "default_batch_limit")]
426    pub limit: usize,
427}
428
429fn default_batch_limit() -> usize {
430    100
431}
432
433impl BatchRecallRequest {
434    /// Create a new batch recall request for an agent.
435    pub fn new(agent_id: impl Into<String>) -> Self {
436        Self {
437            agent_id: agent_id.into(),
438            filter: BatchMemoryFilter::default(),
439            limit: 100,
440        }
441    }
442
443    /// Set filter predicates.
444    pub fn with_filter(mut self, filter: BatchMemoryFilter) -> Self {
445        self.filter = filter;
446        self
447    }
448
449    /// Set result limit.
450    pub fn with_limit(mut self, limit: usize) -> Self {
451        self.limit = limit;
452        self
453    }
454}
455
456/// Response from `POST /v1/memories/recall/batch`.
457#[derive(Debug, Clone, Serialize, Deserialize)]
458pub struct BatchRecallResponse {
459    pub memories: Vec<RecalledMemory>,
460    /// Total memories in the agent namespace.
461    pub total: usize,
462    /// Number of memories that passed the filter.
463    pub filtered: usize,
464}
465
466/// Request body for `DELETE /v1/memories/forget/batch`.
467#[derive(Debug, Clone, Serialize, Deserialize)]
468pub struct BatchForgetRequest {
469    /// Agent whose memory namespace to purge from.
470    pub agent_id: String,
471    /// Filter predicates — **at least one must be set** (server safety guard).
472    pub filter: BatchMemoryFilter,
473}
474
475impl BatchForgetRequest {
476    /// Create a new batch forget request with the given filter.
477    pub fn new(agent_id: impl Into<String>, filter: BatchMemoryFilter) -> Self {
478        Self {
479            agent_id: agent_id.into(),
480            filter,
481        }
482    }
483}
484
485/// Response from `DELETE /v1/memories/forget/batch`.
486#[derive(Debug, Clone, Serialize, Deserialize)]
487pub struct BatchForgetResponse {
488    pub deleted_count: usize,
489}
490
491// ============================================================================
492// Memory Client Methods
493// ============================================================================
494
495impl DakeraClient {
496    // ========================================================================
497    // Memory Operations
498    // ========================================================================
499
500    /// Store a memory for an agent
501    ///
502    /// # Example
503    ///
504    /// ```rust,no_run
505    /// use dakera_client::{DakeraClient, memory::StoreMemoryRequest};
506    ///
507    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
508    /// let client = DakeraClient::new("http://localhost:3000")?;
509    ///
510    /// let request = StoreMemoryRequest::new("agent-1", "The user prefers dark mode")
511    ///     .with_importance(0.8)
512    ///     .with_tags(vec!["preferences".to_string()]);
513    ///
514    /// let response = client.store_memory(request).await?;
515    /// println!("Stored memory: {}", response.memory_id);
516    /// # Ok(())
517    /// # }
518    /// ```
519    pub async fn store_memory(&self, request: StoreMemoryRequest) -> Result<StoreMemoryResponse> {
520        let url = format!("{}/v1/memory/store", self.base_url);
521        let response = self.client.post(&url).json(&request).send().await?;
522        self.handle_response(response).await
523    }
524
525    /// Recall memories by semantic query
526    ///
527    /// # Example
528    ///
529    /// ```rust,no_run
530    /// use dakera_client::{DakeraClient, memory::RecallRequest};
531    ///
532    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
533    /// let client = DakeraClient::new("http://localhost:3000")?;
534    ///
535    /// let request = RecallRequest::new("agent-1", "user preferences")
536    ///     .with_top_k(10);
537    ///
538    /// let response = client.recall(request).await?;
539    /// for memory in response.memories {
540    ///     println!("{}: {} (score: {})", memory.id, memory.content, memory.score);
541    /// }
542    /// # Ok(())
543    /// # }
544    /// ```
545    pub async fn recall(&self, request: RecallRequest) -> Result<RecallResponse> {
546        let url = format!("{}/v1/memory/recall", self.base_url);
547        let response = self.client.post(&url).json(&request).send().await?;
548        self.handle_response(response).await
549    }
550
551    /// Simple recall with just agent_id and query (convenience method)
552    pub async fn recall_simple(
553        &self,
554        agent_id: &str,
555        query: &str,
556        top_k: usize,
557    ) -> Result<RecallResponse> {
558        self.recall(RecallRequest::new(agent_id, query).with_top_k(top_k))
559            .await
560    }
561
562    /// Get a specific memory by ID
563    pub async fn get_memory(&self, memory_id: &str) -> Result<RecalledMemory> {
564        let url = format!("{}/v1/memory/get/{}", self.base_url, memory_id);
565        let response = self.client.get(&url).send().await?;
566        self.handle_response(response).await
567    }
568
569    /// Forget (delete) memories
570    pub async fn forget(&self, request: ForgetRequest) -> Result<ForgetResponse> {
571        let url = format!("{}/v1/memory/forget", self.base_url);
572        let response = self.client.post(&url).json(&request).send().await?;
573        self.handle_response(response).await
574    }
575
576    /// Search memories with advanced filters
577    pub async fn search_memories(&self, request: RecallRequest) -> Result<RecallResponse> {
578        let url = format!("{}/v1/memory/search", self.base_url);
579        let response = self.client.post(&url).json(&request).send().await?;
580        self.handle_response(response).await
581    }
582
583    /// Update an existing memory
584    pub async fn update_memory(
585        &self,
586        agent_id: &str,
587        memory_id: &str,
588        request: UpdateMemoryRequest,
589    ) -> Result<StoreMemoryResponse> {
590        let url = format!(
591            "{}/v1/agents/{}/memories/{}",
592            self.base_url, agent_id, memory_id
593        );
594        let response = self.client.put(&url).json(&request).send().await?;
595        self.handle_response(response).await
596    }
597
598    /// Update importance of memories
599    pub async fn update_importance(
600        &self,
601        agent_id: &str,
602        request: UpdateImportanceRequest,
603    ) -> Result<serde_json::Value> {
604        let url = format!(
605            "{}/v1/agents/{}/memories/importance",
606            self.base_url, agent_id
607        );
608        let response = self.client.put(&url).json(&request).send().await?;
609        self.handle_response(response).await
610    }
611
612    /// Consolidate memories for an agent
613    pub async fn consolidate(
614        &self,
615        agent_id: &str,
616        request: ConsolidateRequest,
617    ) -> Result<ConsolidateResponse> {
618        let url = format!(
619            "{}/v1/agents/{}/memories/consolidate",
620            self.base_url, agent_id
621        );
622        let response = self.client.post(&url).json(&request).send().await?;
623        self.handle_response(response).await
624    }
625
626    /// Submit feedback on a memory recall
627    pub async fn memory_feedback(
628        &self,
629        agent_id: &str,
630        request: FeedbackRequest,
631    ) -> Result<FeedbackResponse> {
632        let url = format!("{}/v1/agents/{}/memories/feedback", self.base_url, agent_id);
633        let response = self.client.post(&url).json(&request).send().await?;
634        self.handle_response(response).await
635    }
636
637    // ========================================================================
638    // Memory Knowledge Graph Operations (CE-5 / SDK-9)
639    // ========================================================================
640
641    /// Traverse the knowledge graph from a memory node.
642    ///
643    /// Requires CE-5 (Memory Knowledge Graph) on the server.
644    ///
645    /// # Arguments
646    /// * `memory_id` – Root memory ID to start traversal from.
647    /// * `options` – Traversal options (depth, edge type filters).
648    ///
649    /// # Example
650    /// ```no_run
651    /// # use dakera_client::{DakeraClient, GraphOptions};
652    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
653    /// let graph = client.memory_graph("mem-abc", GraphOptions::new().depth(2)).await?;
654    /// println!("{} nodes, {} edges", graph.nodes.len(), graph.edges.len());
655    /// # Ok(()) }
656    /// ```
657    pub async fn memory_graph(
658        &self,
659        memory_id: &str,
660        options: GraphOptions,
661    ) -> Result<MemoryGraph> {
662        let mut url = format!("{}/v1/memories/{}/graph", self.base_url, memory_id);
663        let depth = options.depth.unwrap_or(1);
664        url.push_str(&format!("?depth={}", depth));
665        if let Some(types) = &options.types {
666            let type_strs: Vec<String> = types
667                .iter()
668                .map(|t| {
669                    serde_json::to_value(t)
670                        .unwrap()
671                        .as_str()
672                        .unwrap_or("")
673                        .to_string()
674                })
675                .collect();
676            if !type_strs.is_empty() {
677                url.push_str(&format!("&types={}", type_strs.join(",")));
678            }
679        }
680        let response = self.client.get(&url).send().await?;
681        self.handle_response(response).await
682    }
683
684    /// Find the shortest path between two memories in the knowledge graph.
685    ///
686    /// Requires CE-5 (Memory Knowledge Graph) on the server.
687    ///
688    /// # Example
689    /// ```no_run
690    /// # use dakera_client::DakeraClient;
691    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
692    /// let path = client.memory_path("mem-abc", "mem-xyz").await?;
693    /// println!("{} hops: {:?}", path.hops, path.path);
694    /// # Ok(()) }
695    /// ```
696    pub async fn memory_path(&self, source_id: &str, target_id: &str) -> Result<GraphPath> {
697        let url = format!(
698            "{}/v1/memories/{}/path?target={}",
699            self.base_url,
700            source_id,
701            urlencoding::encode(target_id)
702        );
703        let response = self.client.get(&url).send().await?;
704        self.handle_response(response).await
705    }
706
707    /// Create an explicit edge between two memories.
708    ///
709    /// Requires CE-5 (Memory Knowledge Graph) on the server.
710    ///
711    /// # Example
712    /// ```no_run
713    /// # use dakera_client::{DakeraClient, EdgeType};
714    /// # async fn example(client: &DakeraClient) -> dakera_client::Result<()> {
715    /// let resp = client.memory_link("mem-abc", "mem-xyz", EdgeType::LinkedBy).await?;
716    /// println!("Created edge: {}", resp.edge.id);
717    /// # Ok(()) }
718    /// ```
719    pub async fn memory_link(
720        &self,
721        source_id: &str,
722        target_id: &str,
723        edge_type: EdgeType,
724    ) -> Result<GraphLinkResponse> {
725        let url = format!("{}/v1/memories/{}/links", self.base_url, source_id);
726        let request = GraphLinkRequest {
727            target_id: target_id.to_string(),
728            edge_type,
729        };
730        let response = self.client.post(&url).json(&request).send().await?;
731        self.handle_response(response).await
732    }
733
734    /// Export the full knowledge graph for an agent.
735    ///
736    /// Requires CE-5 (Memory Knowledge Graph) on the server.
737    ///
738    /// # Arguments
739    /// * `agent_id` – Agent whose graph to export.
740    /// * `format` – Export format: `"json"` (default), `"graphml"`, or `"csv"`.
741    pub async fn agent_graph_export(&self, agent_id: &str, format: &str) -> Result<GraphExport> {
742        let url = format!(
743            "{}/v1/agents/{}/graph/export?format={}",
744            self.base_url, agent_id, format
745        );
746        let response = self.client.get(&url).send().await?;
747        self.handle_response(response).await
748    }
749
750    // ========================================================================
751    // Session Operations
752    // ========================================================================
753
754    /// Start a new session for an agent
755    pub async fn start_session(&self, agent_id: &str) -> Result<Session> {
756        let url = format!("{}/v1/sessions/start", self.base_url);
757        let request = SessionStartRequest {
758            agent_id: agent_id.to_string(),
759            metadata: None,
760        };
761        let response = self.client.post(&url).json(&request).send().await?;
762        self.handle_response(response).await
763    }
764
765    /// Start a session with metadata
766    pub async fn start_session_with_metadata(
767        &self,
768        agent_id: &str,
769        metadata: serde_json::Value,
770    ) -> Result<Session> {
771        let url = format!("{}/v1/sessions/start", self.base_url);
772        let request = SessionStartRequest {
773            agent_id: agent_id.to_string(),
774            metadata: Some(metadata),
775        };
776        let response = self.client.post(&url).json(&request).send().await?;
777        self.handle_response(response).await
778    }
779
780    /// End a session, optionally with a summary
781    pub async fn end_session(&self, session_id: &str, summary: Option<String>) -> Result<Session> {
782        let url = format!("{}/v1/sessions/{}/end", self.base_url, session_id);
783        let request = SessionEndRequest { summary };
784        let response = self.client.post(&url).json(&request).send().await?;
785        self.handle_response(response).await
786    }
787
788    /// Get a session by ID
789    pub async fn get_session(&self, session_id: &str) -> Result<Session> {
790        let url = format!("{}/v1/sessions/{}", self.base_url, session_id);
791        let response = self.client.get(&url).send().await?;
792        self.handle_response(response).await
793    }
794
795    /// List sessions for an agent
796    pub async fn list_sessions(&self, agent_id: &str) -> Result<Vec<Session>> {
797        let url = format!("{}/v1/sessions?agent_id={}", self.base_url, agent_id);
798        let response = self.client.get(&url).send().await?;
799        self.handle_response(response).await
800    }
801
802    /// Get memories in a session
803    pub async fn session_memories(&self, session_id: &str) -> Result<RecallResponse> {
804        let url = format!("{}/v1/sessions/{}/memories", self.base_url, session_id);
805        let response = self.client.get(&url).send().await?;
806        self.handle_response(response).await
807    }
808
809    // ========================================================================
810    // CE-2: Batch Recall / Forget
811    // ========================================================================
812
813    /// Bulk-recall memories using filter predicates (CE-2).
814    ///
815    /// Uses `POST /v1/memories/recall/batch` — no embedding required.
816    ///
817    /// # Example
818    ///
819    /// ```rust,no_run
820    /// use dakera_client::{DakeraClient, memory::{BatchRecallRequest, BatchMemoryFilter}};
821    ///
822    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
823    /// let client = DakeraClient::new("http://localhost:3000")?;
824    ///
825    /// let filter = BatchMemoryFilter::default().with_min_importance(0.7);
826    /// let req = BatchRecallRequest::new("agent-1").with_filter(filter).with_limit(50);
827    /// let resp = client.batch_recall(req).await?;
828    /// println!("Found {} memories", resp.filtered);
829    /// # Ok(())
830    /// # }
831    /// ```
832    pub async fn batch_recall(&self, request: BatchRecallRequest) -> Result<BatchRecallResponse> {
833        let url = format!("{}/v1/memories/recall/batch", self.base_url);
834        let response = self.client.post(&url).json(&request).send().await?;
835        self.handle_response(response).await
836    }
837
838    /// Bulk-delete memories using filter predicates (CE-2).
839    ///
840    /// Uses `DELETE /v1/memories/forget/batch`.  The server requires at least
841    /// one filter predicate to be set as a safety guard.
842    ///
843    /// # Example
844    ///
845    /// ```rust,no_run
846    /// use dakera_client::{DakeraClient, memory::{BatchForgetRequest, BatchMemoryFilter}};
847    ///
848    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
849    /// let client = DakeraClient::new("http://localhost:3000")?;
850    ///
851    /// let filter = BatchMemoryFilter::default().with_min_importance(0.0).with_max_importance(0.2);
852    /// let resp = client.batch_forget(BatchForgetRequest::new("agent-1", filter)).await?;
853    /// println!("Deleted {} memories", resp.deleted_count);
854    /// # Ok(())
855    /// # }
856    /// ```
857    pub async fn batch_forget(&self, request: BatchForgetRequest) -> Result<BatchForgetResponse> {
858        let url = format!("{}/v1/memories/forget/batch", self.base_url);
859        let response = self.client.delete(&url).json(&request).send().await?;
860        self.handle_response(response).await
861    }
862}