Skip to main content

totalreclaw_memory/
backend.rs

1//! ZeroClaw Memory trait implementation.
2//!
3//! `TotalReclawMemory` is the main entry point for using TotalReclaw as a
4//! ZeroClaw memory backend. It implements the `Memory` trait, providing:
5//!
6//! - E2E encrypted storage on-chain via The Graph subgraph
7//! - Semantic search with BM25 + Cosine + RRF reranking
8//! - LSH blind indexing for server-blind similarity search
9//! - Portable: same recovery phrase works across ZeroClaw, OpenClaw, Claude Desktop, Hermes
10//!
11//! ## ZeroClaw Integration
12//!
13//! Register in ZeroClaw's factory (`src/memory/backend.rs`):
14//! ```ignore
15//! MemoryBackendKind::TotalReclaw => {
16//!     Box::new(TotalReclawMemory::new(config).await?)
17//! }
18//! ```
19//!
20//! Configure in `~/.zeroclaw/config.toml`:
21//! ```toml
22//! [memory]
23//! backend = "totalreclaw"
24//!
25//! [memory.totalreclaw]
26//! recovery_phrase_path = "~/.totalreclaw/credentials.json"
27//! embedding_config_path = "~/.totalreclaw/embedding-config.json"
28//! relay_url = "https://api.totalreclaw.xyz"
29//! ```
30//!
31//! ## Category Mapping (TotalReclaw -> ZeroClaw)
32//!
33//! ZeroClaw applies 7-day half-life decay to non-Core entries at retrieval time.
34//! We map TotalReclaw memory types to ZeroClaw categories:
35//!
36//! | TotalReclaw type | ZeroClaw category | Decay |
37//! |------------------|-------------------|-------|
38//! | fact             | Core              | None  |
39//! | preference       | Core              | None  |
40//! | decision         | Core              | None  |
41//! | goal             | Core              | None  |
42//! | summary          | Core              | None  |
43//! | episodic         | Conversation      | 7-day |
44//! | context          | Daily             | 7-day |
45
46use base64::Engine;
47
48use crate::billing::{self, BillingCache};
49use crate::crypto::{self, DerivedKeys};
50use crate::embedding::{self, EmbeddingMode, EmbeddingProvider};
51use crate::hotcache::HotCache;
52use crate::lsh::LshHasher;
53use crate::reranker::{self, Candidate, RerankerConfig};
54use crate::relay::{RelayClient, RelayConfig};
55use crate::search;
56use crate::store;
57use crate::wallet;
58use crate::Result;
59use totalreclaw_core::claims::MemorySource;
60
61/// Default relay URL.
62const DEFAULT_RELAY_URL: &str = "https://api.totalreclaw.xyz";
63
64/// Result of parsing the decrypted fact envelope.
65///
66/// ZeroClaw reads both envelope shapes:
67///  * **v0 (pre-2.0)**: `{"t": text, "a": agent_id, "s": source_tag}`
68///    (source_tag is `zeroclaw_{category}` or `openclaw_extraction` etc.)
69///  * **v1 (Memory Taxonomy v1)**: full v1 `ClaimPayload` with `text`,
70///    `type`, `source` (one of `user|user-inferred|assistant|external|derived`),
71///    `scope`, optional `reasoning`, `volatility`, `entities`, `importance`.
72struct DecryptedEnvelope {
73    text: String,
74    category: MemoryCategory,
75    /// Memory Taxonomy v1 provenance source if the envelope is v1; `None`
76    /// for v0 envelopes (read-side back-compat). Used by `rerank_with_config`
77    /// to apply Retrieval v2 Tier 1 source weights.
78    v1_source: Option<MemorySource>,
79}
80
81/// Parse the decrypted fact envelope.
82///
83/// Tries v1 first (ClaimPayload JSON), falls back to v0 (`{t,a,s}` envelope)
84/// on parse failure so pre-2.0 vault entries still decode.
85fn parse_decrypted_envelope(decrypted: &str) -> DecryptedEnvelope {
86    // Try v1 ClaimPayload JSON first. A v1 envelope has `text` and `type`
87    // fields at top level; v0 envelopes use `t`/`a`/`s` short keys.
88    if let Ok(obj) = serde_json::from_str::<serde_json::Value>(decrypted) {
89        let is_v1 = obj.get("text").is_some() && obj.get("type").is_some();
90        if is_v1 {
91            let text = obj
92                .get("text")
93                .and_then(|v| v.as_str())
94                .unwrap_or(decrypted)
95                .to_string();
96            // v1 source field: literal enum string
97            let v1_source = obj
98                .get("source")
99                .and_then(|v| v.as_str())
100                .and_then(parse_v1_source);
101            // v1 "type" → ZeroClaw category mapping
102            let v1_type = obj
103                .get("type")
104                .and_then(|v| v.as_str())
105                .unwrap_or("claim");
106            let category = v1_type_to_category(v1_type);
107            return DecryptedEnvelope {
108                text,
109                category,
110                v1_source,
111            };
112        }
113
114        // v0 envelope path (pre-2.0)
115        let text = obj
116            .get("t")
117            .and_then(|v| v.as_str())
118            .unwrap_or(decrypted)
119            .to_string();
120        let source = obj.get("s").and_then(|v| v.as_str()).unwrap_or("");
121        let category = category_from_source(source);
122        return DecryptedEnvelope {
123            text,
124            category,
125            v1_source: None,
126        };
127    }
128    // Fallback: treat entire decrypted content as text
129    DecryptedEnvelope {
130        text: decrypted.to_string(),
131        category: MemoryCategory::Core,
132        v1_source: None,
133    }
134}
135
136/// Parse a v1 memory source literal to the core `MemorySource` enum.
137fn parse_v1_source(raw: &str) -> Option<MemorySource> {
138    match raw {
139        "user" => Some(MemorySource::User),
140        "user-inferred" => Some(MemorySource::UserInferred),
141        "assistant" => Some(MemorySource::Assistant),
142        "external" => Some(MemorySource::External),
143        "derived" => Some(MemorySource::Derived),
144        _ => None,
145    }
146}
147
148/// Map a v1 memory type to a ZeroClaw memory category.
149///
150/// Category rules:
151///  * claim, preference, directive, commitment → Core (durable)
152///  * episode → Conversation (7-day decay)
153///  * summary → Core (synthesis is high-value)
154///  * anything else → Core (safe default)
155fn v1_type_to_category(v1_type: &str) -> MemoryCategory {
156    match v1_type {
157        "episode" => MemoryCategory::Conversation,
158        "claim" | "preference" | "directive" | "commitment" | "summary" => {
159            MemoryCategory::Core
160        }
161        _ => MemoryCategory::Core,
162    }
163}
164
165/// Map a source string to a ZeroClaw memory category.
166fn category_from_source(source: &str) -> MemoryCategory {
167    let lower = source.to_lowercase();
168    if lower.contains("conversation") || lower.contains("episodic") {
169        MemoryCategory::Conversation
170    } else if lower.contains("daily") || lower.contains("context") {
171        MemoryCategory::Daily
172    } else {
173        // Core covers: fact, preference, decision, goal, summary, rule, debrief, and unknown
174        MemoryCategory::Core
175    }
176}
177
178/// Auto-recall top_k constant (after reranking). Matches all other clients.
179const AUTO_RECALL_TOP_K: usize = 8;
180
181// ---------------------------------------------------------------------------
182// ZeroClaw-compatible types
183// ---------------------------------------------------------------------------
184
185/// Memory category (matches ZeroClaw's `MemoryCategory`).
186#[derive(Debug, Clone, PartialEq)]
187pub enum MemoryCategory {
188    Core,
189    Daily,
190    Conversation,
191    Custom(String),
192}
193
194impl std::fmt::Display for MemoryCategory {
195    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
196        match self {
197            MemoryCategory::Core => write!(f, "core"),
198            MemoryCategory::Daily => write!(f, "daily"),
199            MemoryCategory::Conversation => write!(f, "conversation"),
200            MemoryCategory::Custom(s) => write!(f, "{}", s),
201        }
202    }
203}
204
205/// A memory entry (matches ZeroClaw's `MemoryEntry`).
206#[derive(Debug, Clone)]
207pub struct MemoryEntry {
208    pub id: String,
209    pub key: String,
210    pub content: String,
211    pub category: MemoryCategory,
212    pub timestamp: String,
213    pub session_id: Option<String>,
214    pub score: Option<f64>,
215}
216
217// ---------------------------------------------------------------------------
218// TotalReclawMemory
219// ---------------------------------------------------------------------------
220
221/// TotalReclaw memory backend for ZeroClaw.
222///
223/// Implements the ZeroClaw `Memory` trait with full E2E encryption.
224pub struct TotalReclawMemory {
225    keys: DerivedKeys,
226    lsh_hasher: LshHasher,
227    embedding_provider: Box<dyn EmbeddingProvider>,
228    relay: RelayClient,
229    private_key: [u8; 32],
230    /// In-memory hot cache for recent recall results.
231    hot_cache: std::sync::Mutex<HotCache>,
232}
233
234/// Configuration for creating a TotalReclawMemory instance.
235pub struct TotalReclawConfig {
236    pub mnemonic: String,
237    pub embedding_mode: EmbeddingMode,
238    pub embedding_dims: usize,
239    pub relay_url: String,
240    pub is_test: bool,
241}
242
243impl Default for TotalReclawConfig {
244    fn default() -> Self {
245        Self {
246            mnemonic: String::new(),
247            embedding_mode: EmbeddingMode::Ollama {
248                base_url: "http://localhost:11434".into(),
249                model: "nomic-embed-text".into(),
250            },
251            embedding_dims: 640,
252            relay_url: DEFAULT_RELAY_URL.into(),
253            is_test: false,
254        }
255    }
256}
257
258impl TotalReclawMemory {
259    /// Create a new TotalReclaw memory backend.
260    ///
261    /// This initializes all crypto keys, derives the EOA and Smart Account,
262    /// sets up the LSH hasher, the embedding provider, and registers with the relay.
263    pub async fn new(config: TotalReclawConfig) -> Result<Self> {
264        // Derive keys from mnemonic
265        let keys = crypto::derive_keys_from_mnemonic(&config.mnemonic)?;
266        let lsh_seed = crypto::derive_lsh_seed(&config.mnemonic, &keys.salt)?;
267        let lsh_hasher = LshHasher::new(&lsh_seed, config.embedding_dims)?;
268
269        // Create embedding provider
270        let embedding_provider =
271            embedding::create_provider(config.embedding_mode, config.embedding_dims)?;
272
273        // Derive EOA + private key natively (BIP-44)
274        let eth_wallet = wallet::derive_eoa(&config.mnemonic)?;
275        let private_key = eth_wallet.private_key;
276
277        // Resolve Smart Account address via CREATE2 factory
278        let wallet_address =
279            wallet::resolve_smart_account_address(&eth_wallet.address, "https://sepolia.base.org")
280                .await?;
281
282        // Compute auth key hash and hex
283        let auth_key_hex = hex::encode(keys.auth_key);
284        let auth_key_hash = crypto::compute_auth_key_hash(&keys.auth_key);
285        let salt_hex = hex::encode(keys.salt);
286
287        // Auto-detect Pro tier from billing cache for chain routing
288        // Free tier = Base Sepolia (84532), Pro tier = Gnosis mainnet (100)
289        let chain_id = if let Some(cache) = billing::read_cache() {
290            if cache.is_pro() { 100 } else { 84532 }
291        } else {
292            84532
293        };
294
295        // Create relay client with wallet address
296        let relay_config = RelayConfig {
297            relay_url: config.relay_url.clone(),
298            auth_key_hex: auth_key_hex.clone(),
299            wallet_address: wallet_address.clone(),
300            is_test: config.is_test,
301            chain_id,
302        };
303        let mut relay = RelayClient::new(relay_config);
304
305        // Register with relay (idempotent)
306        let _user_id = relay
307            .register(&auth_key_hash, &salt_hex)
308            .await
309            .ok(); // Non-fatal if registration fails (may already be registered)
310
311        // Re-check billing to potentially update chain_id for Pro users
312        if let Ok(status) = relay.billing_status().await {
313            if status.tier.as_deref() == Some("pro") {
314                relay.set_chain_id(100);
315            }
316        }
317
318        Ok(Self {
319            keys,
320            lsh_hasher,
321            embedding_provider,
322            relay,
323            private_key,
324            hot_cache: std::sync::Mutex::new(HotCache::new()),
325        })
326    }
327
328    /// Get the wallet address.
329    pub fn wallet_address(&self) -> &str {
330        self.relay.wallet_address()
331    }
332
333    /// Get a reference to the relay client.
334    pub fn relay(&self) -> &RelayClient {
335        &self.relay
336    }
337
338    /// Get a reference to the derived keys.
339    pub fn keys(&self) -> &DerivedKeys {
340        &self.keys
341    }
342
343    /// Get a reference to the private key.
344    pub fn private_key(&self) -> &[u8; 32] {
345        &self.private_key
346    }
347
348    // -----------------------------------------------------------------------
349    // Memory trait methods
350    // -----------------------------------------------------------------------
351
352    /// Backend name.
353    pub fn name(&self) -> &str {
354        "totalreclaw"
355    }
356
357    /// Store a memory entry using native UserOp.
358    ///
359    /// Importance defaults to 10 (max) if not specified. Per the client-consistency
360    /// spec, `decayScore = importance / 10`, so importance=10 -> decayScore=1.0.
361    ///
362    /// Clears the hot cache after storing to avoid stale results.
363    pub async fn store(
364        &self,
365        _key: &str,
366        content: &str,
367        category: MemoryCategory,
368        _session_id: Option<&str>,
369    ) -> Result<()> {
370        self.store_with_importance(_key, content, category, _session_id, 10.0)
371            .await
372    }
373
374    /// Store a memory entry with explicit importance (1-10 scale).
375    ///
376    /// Per the client-consistency spec, `decayScore = importance / 10`.
377    pub async fn store_with_importance(
378        &self,
379        _key: &str,
380        content: &str,
381        category: MemoryCategory,
382        _session_id: Option<&str>,
383        importance: f64,
384    ) -> Result<()> {
385        let source = format!("zeroclaw_{}", category);
386        store::store_fact_with_importance(
387            content,
388            &source,
389            importance,
390            &self.keys,
391            &self.lsh_hasher,
392            self.embedding_provider.as_ref(),
393            &self.relay,
394            Some(&self.private_key),
395        )
396        .await?;
397
398        // Invalidate hot cache after store (new data available)
399        if let Ok(mut cache) = self.hot_cache.lock() {
400            cache.clear();
401        }
402        Ok(())
403    }
404
405    /// Store multiple memory entries as a single batched UserOp.
406    ///
407    /// Gas savings: ~64% vs individual submissions for batch of 5.
408    pub async fn store_batch(
409        &self,
410        facts: &[(&str, &str)], // (content, source) pairs
411    ) -> Result<Vec<String>> {
412        let result = store::store_fact_batch(
413            facts,
414            &self.keys,
415            &self.lsh_hasher,
416            self.embedding_provider.as_ref(),
417            &self.relay,
418            &self.private_key,
419        )
420        .await?;
421
422        // Invalidate hot cache after batch store
423        if let Ok(mut cache) = self.hot_cache.lock() {
424            cache.clear();
425        }
426        Ok(result)
427    }
428
429    /// Recall memories matching a query.
430    ///
431    /// Uses a hot cache to skip remote queries when a semantically similar
432    /// query (cosine >= 0.85) was recently answered.
433    pub async fn recall(
434        &self,
435        query: &str,
436        limit: usize,
437        _session_id: Option<&str>,
438    ) -> Result<Vec<MemoryEntry>> {
439        // 1. Generate query embedding first (needed for both cache check and search)
440        let query_embedding = self.embedding_provider.embed(query).await?;
441
442        // 2. Hot cache check: skip remote query if similar query was recently answered
443        if let Ok(cache) = self.hot_cache.lock() {
444            if let Some(cached_results) = cache.lookup(&query_embedding) {
445                return Ok(cached_results);
446            }
447        }
448
449        // 3. Generate query trapdoors (word hashes + stems)
450        let word_trapdoors = crate::blind::generate_blind_indices(query);
451
452        // 4. Generate LSH trapdoors from embedding
453        let embedding_f64: Vec<f64> = query_embedding.iter().map(|&f| f as f64).collect();
454        let lsh_trapdoors = self.lsh_hasher.hash(&embedding_f64)?;
455
456        // 5. Combine all trapdoors
457        let mut all_trapdoors = word_trapdoors;
458        all_trapdoors.extend(lsh_trapdoors.into_iter());
459
460        // 6. Dynamic candidate pool sizing from billing cache
461        let billing_cache = billing::read_cache();
462        let max_candidates = billing::get_max_candidate_pool(billing_cache.as_ref());
463
464        // 7. Search subgraph
465        let mut candidates = search::search_candidates(
466            &self.relay,
467            self.relay.wallet_address(),
468            &all_trapdoors,
469            max_candidates,
470        )
471        .await?;
472
473        // Always run broadened search and merge — ensures vocabulary mismatches
474        // (e.g., "preferences" vs "prefer") don't cause recall failures.
475        // The reranker handles scoring; extra cost is ~1 GraphQL query per recall.
476        let broadened = search::search_broadened(
477            &self.relay,
478            self.relay.wallet_address(),
479            max_candidates,
480        )
481        .await
482        .unwrap_or_default();
483
484        // Merge broadened results with existing candidates (deduplicate by ID)
485        let mut seen: std::collections::HashSet<String> =
486            candidates.iter().map(|c| c.id.clone()).collect();
487        for fact in broadened {
488            if !seen.contains(&fact.id) {
489                seen.insert(fact.id.clone());
490                candidates.push(fact);
491            }
492        }
493
494        // 8. Decrypt candidates and build reranker input (with v1 provenance)
495        let mut rerank_candidates = Vec::new();
496        for fact in &candidates {
497            // Decrypt content
498            let blob_b64 = match search::hex_blob_to_base64(&fact.encrypted_blob) {
499                Some(b) => b,
500                None => continue,
501            };
502            let decrypted = match crypto::decrypt(&blob_b64, &self.keys.encryption_key) {
503                Ok(t) => t,
504                Err(_) => continue,
505            };
506
507            // Parse the envelope to extract the display text + v1 source.
508            // v1 blobs carry `source` (Retrieval v2 Tier 1 weighting signal);
509            // v0 blobs don't — reranker falls back to the legacy claim weight.
510            let envelope = parse_decrypted_envelope(&decrypted);
511            let text = envelope.text;
512
513            // Decrypt embedding (if available)
514            let mut emb = fact
515                .encrypted_embedding
516                .as_deref()
517                .and_then(|e| crypto::decrypt(e, &self.keys.encryption_key).ok())
518                .and_then(|b64| {
519                    base64::engine::general_purpose::STANDARD
520                        .decode(&b64)
521                        .ok()
522                })
523                .map(|bytes| {
524                    bytes
525                        .chunks_exact(4)
526                        .map(|c| f32::from_le_bytes([c[0], c[1], c[2], c[3]]))
527                        .collect::<Vec<f32>>()
528                })
529                .unwrap_or_default();
530
531            // Re-embed if stored dimension differs from current model
532            let expected_dims = self.embedding_provider.dimensions();
533            if !emb.is_empty() && emb.len() != expected_dims {
534                match self.embedding_provider.embed(&text).await {
535                    Ok(fresh) => emb = fresh,
536                    Err(_) => emb = Vec::new(),
537                }
538            }
539
540            rerank_candidates.push(Candidate {
541                id: fact.id.clone(),
542                text: text.clone(),
543                embedding: emb,
544                timestamp: fact.timestamp.clone().unwrap_or_default(),
545                source: envelope.v1_source,
546            });
547        }
548
549        // 9. Rerank with Retrieval v2 Tier 1 source weights enabled.
550        // v1 blobs carry a MemorySource; v0 blobs fall through to the legacy
551        // claim weight so pre-2.0 vaults aren't penalized.
552        let ranked = reranker::rerank_with_config(
553            query,
554            &query_embedding,
555            &rerank_candidates,
556            limit,
557            RerankerConfig {
558                apply_source_weights: true,
559            },
560        )?;
561
562        // 10. Convert to MemoryEntry
563        let results: Vec<MemoryEntry> = ranked
564            .into_iter()
565            .map(|r| MemoryEntry {
566                id: r.id.clone(),
567                key: r.id,
568                content: r.text,
569                category: MemoryCategory::Core, // Category not stored in subgraph; default to Core
570                timestamp: r.timestamp,
571                session_id: None,
572                score: Some(r.score),
573            })
574            .collect();
575
576        // 11. Update hot cache
577        if let Ok(mut cache) = self.hot_cache.lock() {
578            cache.insert(query_embedding, results.clone());
579        }
580
581        Ok(results)
582    }
583
584    /// Auto-recall: search with the spec-mandated top_k=8.
585    ///
586    /// Per the client-consistency spec, auto-recall at session start
587    /// uses the raw user message as query and returns top 8 after reranking.
588    pub async fn auto_recall(&self, query: &str) -> Result<Vec<MemoryEntry>> {
589        self.recall(query, AUTO_RECALL_TOP_K, None).await
590    }
591
592    /// Get a specific memory entry by key/ID.
593    pub async fn get(&self, key: &str) -> Result<Option<MemoryEntry>> {
594        let results = self.recall(key, 1, None).await?;
595        Ok(results.into_iter().next())
596    }
597
598    /// List all memories (paginated export).
599    ///
600    /// Parses each decrypted envelope to extract the correct category
601    /// (previously hardcoded to Core).
602    pub async fn list(
603        &self,
604        _category: Option<&MemoryCategory>,
605        _session_id: Option<&str>,
606    ) -> Result<Vec<MemoryEntry>> {
607        let facts = search::fetch_all_facts(&self.relay, self.relay.wallet_address()).await?;
608
609        let mut entries = Vec::new();
610        for fact in facts {
611            let blob_b64 = match search::hex_blob_to_base64(&fact.encrypted_blob) {
612                Some(b) => b,
613                None => continue,
614            };
615            let decrypted = match crypto::decrypt(&blob_b64, &self.keys.encryption_key) {
616                Ok(t) => t,
617                Err(_) => continue,
618            };
619
620            let envelope = parse_decrypted_envelope(&decrypted);
621
622            entries.push(MemoryEntry {
623                id: fact.id.clone(),
624                key: fact.id,
625                content: envelope.text,
626                category: envelope.category,
627                timestamp: fact.timestamp.unwrap_or_default(),
628                session_id: None,
629                score: None,
630            });
631        }
632
633        Ok(entries)
634    }
635
636    /// Forget (soft-delete) a memory entry using native UserOp.
637    ///
638    /// Emits a Memory Taxonomy v1 tombstone (outer protobuf `version = 4`)
639    /// so the subgraph can distinguish v1 deletes from legacy v3 deletes.
640    pub async fn forget(&self, key: &str) -> Result<bool> {
641        store::store_tombstone_v1(key, &self.relay, Some(&self.private_key)).await?;
642        Ok(true)
643    }
644
645    // -----------------------------------------------------------------------
646    // Memory Taxonomy v1 extensions
647    // -----------------------------------------------------------------------
648
649    /// Store a Memory Taxonomy v1 claim (explicit v1 write path).
650    ///
651    /// Use this when the caller has full v1 context (type, source, scope,
652    /// volatility, optional reasoning). Produces a v4 protobuf envelope
653    /// with the canonical `MemoryClaimV1` JSON inner blob.
654    ///
655    /// For the ZeroClaw `Memory` trait's simpler `(key, content, category)`
656    /// shape, `store_with_importance()` remains the primary entry point
657    /// and continues to emit v3 envelopes during the v0→v1 migration
658    /// window. Switch your agent to `store_v1()` when you want v1
659    /// provenance-weighted reranking at recall time.
660    pub async fn store_v1(&self, input: &store::V1StoreInput) -> Result<String> {
661        let fact_id = store::store_fact_v1(
662            input,
663            &self.keys,
664            &self.lsh_hasher,
665            self.embedding_provider.as_ref(),
666            &self.relay,
667            Some(&self.private_key),
668        )
669        .await?;
670
671        // Invalidate hot cache after v1 store
672        if let Ok(mut cache) = self.hot_cache.lock() {
673            cache.clear();
674        }
675        Ok(fact_id)
676    }
677
678    /// Pin a memory claim — supersede it with a new v1 blob whose
679    /// `volatility: stable` signals "never auto-supersede".
680    ///
681    /// ZeroClaw implements pin as a supersede operation: fetch the fact,
682    /// re-store it as a v1 claim with `volatility: stable`, then tombstone
683    /// the prior version. The new claim's `superseded_by` field tracks the
684    /// previous id so a future unpin can reverse the chain.
685    ///
686    /// Not-yet-implemented: this method returns an error advising the
687    /// caller to run the MCP server's `totalreclaw_pin` tool instead.
688    /// See ZeroClaw 2.0 Known Gaps in CLAUDE.md.
689    pub async fn pin(&self, _memory_id: &str) -> Result<()> {
690        Err(crate::Error::Crypto(
691            "pin: not yet implemented in ZeroClaw — use the MCP totalreclaw_pin tool \
692             from your agent. See CLAUDE.md Known Gaps."
693                .into(),
694        ))
695    }
696
697    /// Retype a memory claim — change its v1 `type` (claim → directive, etc.)
698    /// via supersede.
699    ///
700    /// Not-yet-implemented in ZeroClaw's native trait. The MCP server's
701    /// `totalreclaw_retype` tool is the canonical path.
702    pub async fn retype(
703        &self,
704        _memory_id: &str,
705        _new_type: MemorySource,
706    ) -> Result<()> {
707        Err(crate::Error::Crypto(
708            "retype: not yet implemented in ZeroClaw — use the MCP \
709             totalreclaw_retype tool. See CLAUDE.md Known Gaps."
710                .into(),
711        ))
712    }
713
714    /// Set or change the v1 `scope` of a memory claim via supersede.
715    ///
716    /// Not-yet-implemented in ZeroClaw's native trait. The MCP server's
717    /// `totalreclaw_set_scope` tool is the canonical path.
718    pub async fn set_scope(
719        &self,
720        _memory_id: &str,
721        _new_scope: totalreclaw_core::claims::MemoryScope,
722    ) -> Result<()> {
723        Err(crate::Error::Crypto(
724            "set_scope: not yet implemented in ZeroClaw — use the MCP \
725             totalreclaw_set_scope tool. See CLAUDE.md Known Gaps."
726                .into(),
727        ))
728    }
729
730    /// Count active memories.
731    pub async fn count(&self) -> Result<usize> {
732        search::count_facts(&self.relay, self.relay.wallet_address()).await
733    }
734
735    /// Health check.
736    pub async fn health_check(&self) -> bool {
737        self.relay.health_check().await.unwrap_or(false)
738    }
739
740    /// Billing status -- tier, usage, limits. Also updates the billing cache.
741    pub async fn status(&self) -> Result<crate::relay::BillingStatus> {
742        self.relay.billing_status().await
743    }
744
745    /// Fetch billing cache (from disk or relay, with 2h TTL).
746    ///
747    /// Returns a cached billing status with parsed feature flags.
748    pub async fn billing_cache(&self) -> Result<BillingCache> {
749        billing::fetch_billing_status(&self.relay).await
750    }
751
752    /// Check for quota warnings (>80% usage).
753    ///
754    /// Returns a human-readable warning message or None if usage is below 80%.
755    /// Call at session start (before_agent_start equivalent).
756    pub async fn quota_warning(&self) -> Option<String> {
757        let cache = billing::fetch_billing_status(&self.relay).await.ok()?;
758        cache.quota_warning_message()
759    }
760
761    /// Store debrief items from a session.
762    ///
763    /// ZeroClaw calls this from its consolidation phase or session-end handler.
764    /// The caller is responsible for running the LLM and passing parsed results.
765    ///
766    /// Each item is stored via the existing `store` pipeline with
767    /// `source: "zeroclaw_debrief"`.
768    pub async fn debrief(&self, items: &[crate::debrief::DebriefItem]) -> Result<usize> {
769        let mut stored = 0;
770        for item in items.iter().take(crate::debrief::MAX_DEBRIEF_ITEMS) {
771            let importance = item.importance as f64;
772            store::store_fact_with_importance(
773                &item.text,
774                crate::debrief::DEBRIEF_SOURCE,
775                importance,
776                &self.keys,
777                &self.lsh_hasher,
778                self.embedding_provider.as_ref(),
779                &self.relay,
780                Some(&self.private_key),
781            )
782            .await?;
783            stored += 1;
784        }
785
786        // Invalidate hot cache after debrief store
787        if let Ok(mut cache) = self.hot_cache.lock() {
788            cache.clear();
789        }
790        Ok(stored)
791    }
792
793    /// Export all memories as plaintext (decrypted).
794    pub async fn export(&self) -> Result<Vec<MemoryEntry>> {
795        self.list(None, None).await
796    }
797
798    /// Upgrade to Pro tier -- returns Stripe checkout URL.
799    pub async fn upgrade(&self) -> Result<String> {
800        self.relay.create_checkout().await
801    }
802}