1use anyhow::{Context, Result};
47use hashtree_blossom::BlossomClient;
48use hashtree_core::{decode_tree_node, decrypt_chk, LinkType};
49use nostr_sdk::prelude::*;
50use serde::Deserialize;
51use std::collections::HashMap;
52use std::time::Duration;
53use tracing::{debug, info, warn};
54
55pub const KIND_APP_DATA: u16 = 30078;
57
58pub const KIND_PULL_REQUEST: u16 = 1618;
60pub const KIND_STATUS_OPEN: u16 = 1630;
61pub const KIND_STATUS_APPLIED: u16 = 1631;
62pub const KIND_STATUS_CLOSED: u16 = 1632;
63pub const KIND_STATUS_DRAFT: u16 = 1633;
64pub const KIND_REPO_ANNOUNCEMENT: u16 = 30617;
65
66pub const LABEL_HASHTREE: &str = "hashtree";
68pub const LABEL_GIT: &str = "git";
69
70#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72pub enum PullRequestState {
73 Open,
74 Applied,
75 Closed,
76 Draft,
77}
78
79impl PullRequestState {
80 pub fn as_str(self) -> &'static str {
81 match self {
82 PullRequestState::Open => "open",
83 PullRequestState::Applied => "applied",
84 PullRequestState::Closed => "closed",
85 PullRequestState::Draft => "draft",
86 }
87 }
88
89 fn from_status_kind(status_kind: u16) -> Option<Self> {
90 match status_kind {
91 KIND_STATUS_OPEN => Some(PullRequestState::Open),
92 KIND_STATUS_APPLIED => Some(PullRequestState::Applied),
93 KIND_STATUS_CLOSED => Some(PullRequestState::Closed),
94 KIND_STATUS_DRAFT => Some(PullRequestState::Draft),
95 _ => None,
96 }
97 }
98
99 fn from_latest_status_kind(status_kind: Option<u16>) -> Self {
100 status_kind
101 .and_then(Self::from_status_kind)
102 .unwrap_or(PullRequestState::Open)
103 }
104}
105
106#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
108pub enum PullRequestStateFilter {
109 #[default]
110 Open,
111 Applied,
112 Closed,
113 Draft,
114 All,
115}
116
117impl PullRequestStateFilter {
118 pub fn as_str(self) -> &'static str {
119 match self {
120 PullRequestStateFilter::Open => "open",
121 PullRequestStateFilter::Applied => "applied",
122 PullRequestStateFilter::Closed => "closed",
123 PullRequestStateFilter::Draft => "draft",
124 PullRequestStateFilter::All => "all",
125 }
126 }
127
128 fn includes(self, state: PullRequestState) -> bool {
129 match self {
130 PullRequestStateFilter::All => true,
131 PullRequestStateFilter::Open => state == PullRequestState::Open,
132 PullRequestStateFilter::Applied => state == PullRequestState::Applied,
133 PullRequestStateFilter::Closed => state == PullRequestState::Closed,
134 PullRequestStateFilter::Draft => state == PullRequestState::Draft,
135 }
136 }
137}
138
139#[derive(Debug, Clone)]
141pub struct PullRequestListItem {
142 pub event_id: String,
143 pub author_pubkey: String,
144 pub state: PullRequestState,
145 pub subject: Option<String>,
146 pub commit_tip: Option<String>,
147 pub branch: Option<String>,
148 pub target_branch: Option<String>,
149 pub created_at: u64,
150}
151
152type FetchedRefs = (HashMap<String, String>, Option<String>, Option<[u8; 32]>);
153
154#[derive(Debug, Clone)]
156pub struct StoredKey {
157 pub secret_hex: Option<String>,
159 pub pubkey_hex: String,
161 pub petname: Option<String>,
163}
164
165impl StoredKey {
166 pub fn from_secret_hex(secret_hex: &str, petname: Option<String>) -> Result<Self> {
168 use secp256k1::{Secp256k1, SecretKey};
169
170 let sk_bytes = hex::decode(secret_hex).context("Invalid hex in secret key")?;
171 let sk = SecretKey::from_slice(&sk_bytes).context("Invalid secret key")?;
172 let secp = Secp256k1::new();
173 let pk = sk.x_only_public_key(&secp).0;
174 let pubkey_hex = hex::encode(pk.serialize());
175
176 Ok(Self {
177 secret_hex: Some(secret_hex.to_string()),
178 pubkey_hex,
179 petname,
180 })
181 }
182
183 pub fn from_nsec(nsec: &str, petname: Option<String>) -> Result<Self> {
185 let secret_key =
186 SecretKey::parse(nsec).map_err(|e| anyhow::anyhow!("Invalid nsec format: {}", e))?;
187 let secret_hex = hex::encode(secret_key.to_secret_bytes());
188 Self::from_secret_hex(&secret_hex, petname)
189 }
190
191 pub fn from_pubkey_hex(pubkey_hex: &str, petname: Option<String>) -> Result<Self> {
193 let pubkey = PublicKey::from_hex(pubkey_hex)
194 .map_err(|e| anyhow::anyhow!("Invalid pubkey hex: {}", e))?;
195
196 Ok(Self {
197 secret_hex: None,
198 pubkey_hex: hex::encode(pubkey.to_bytes()),
199 petname,
200 })
201 }
202
203 pub fn from_npub(npub: &str, petname: Option<String>) -> Result<Self> {
205 let pubkey =
206 PublicKey::parse(npub).map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
207
208 Ok(Self {
209 secret_hex: None,
210 pubkey_hex: hex::encode(pubkey.to_bytes()),
211 petname,
212 })
213 }
214}
215
216#[derive(Clone, Copy)]
217enum IdentityFileKind {
218 Keys,
219 Aliases,
220}
221
222fn ensure_aliases_file_hint() {
223 let aliases_path = hashtree_config::get_aliases_path();
224 if aliases_path.exists() {
225 return;
226 }
227
228 let Some(parent) = aliases_path.parent() else {
229 return;
230 };
231
232 if !parent.exists() {
233 return;
234 }
235
236 let template = concat!(
237 "# Public read-only aliases for repos you clone or fetch.\n",
238 "# Format: npub1... alias\n",
239 "# Example:\n",
240 "# npub1xndmdgymsf4a34rzr7346vp8qcptxf75pjqweh8naa8rklgxpfqqmfjtce sirius\n",
241 );
242
243 let _ = std::fs::OpenOptions::new()
244 .write(true)
245 .create_new(true)
246 .open(&aliases_path)
247 .and_then(|mut file| std::io::Write::write_all(&mut file, template.as_bytes()));
248}
249
250fn parse_identity_entry(
251 raw: &str,
252 petname: Option<String>,
253 kind: IdentityFileKind,
254) -> Option<StoredKey> {
255 let key = match kind {
256 IdentityFileKind::Keys => {
257 if raw.starts_with("nsec1") {
258 StoredKey::from_nsec(raw, petname)
259 } else if raw.starts_with("npub1") {
260 StoredKey::from_npub(raw, petname)
261 } else if raw.len() == 64 {
262 StoredKey::from_secret_hex(raw, petname)
263 } else {
264 return None;
265 }
266 }
267 IdentityFileKind::Aliases => {
268 if raw.starts_with("npub1") {
269 StoredKey::from_npub(raw, petname)
270 } else if raw.len() == 64 {
271 StoredKey::from_pubkey_hex(raw, petname)
272 } else {
273 return None;
274 }
275 }
276 };
277
278 key.ok()
279}
280
281fn load_identities_from_path(path: &std::path::Path, kind: IdentityFileKind) -> Vec<StoredKey> {
282 let mut keys = Vec::new();
283
284 if let Ok(content) = std::fs::read_to_string(path) {
285 for entry in hashtree_config::parse_keys_file(&content) {
286 if let Some(key) = parse_identity_entry(&entry.secret, entry.alias, kind) {
287 debug!(
288 "Loaded identity: pubkey={}, petname={:?}, has_secret={}",
289 key.pubkey_hex,
290 key.petname,
291 key.secret_hex.is_some()
292 );
293 keys.push(key);
294 }
295 }
296 }
297
298 keys
299}
300
301fn resolve_self_identity(keys: &[StoredKey]) -> Option<(String, Option<String>)> {
302 keys.iter()
303 .find(|k| k.petname.as_deref() == Some("self") && k.secret_hex.is_some())
304 .or_else(|| {
305 keys.iter()
306 .find(|k| k.petname.as_deref() == Some("default") && k.secret_hex.is_some())
307 })
308 .or_else(|| keys.iter().find(|k| k.secret_hex.is_some()))
309 .map(|key| (key.pubkey_hex.clone(), key.secret_hex.clone()))
310}
311
312pub fn load_keys() -> Vec<StoredKey> {
314 ensure_aliases_file_hint();
315
316 let mut keys =
317 load_identities_from_path(&hashtree_config::get_keys_path(), IdentityFileKind::Keys);
318 keys.extend(load_identities_from_path(
319 &hashtree_config::get_aliases_path(),
320 IdentityFileKind::Aliases,
321 ));
322
323 keys
324}
325
326pub fn resolve_identity(identifier: &str) -> Result<(String, Option<String>)> {
333 let keys = load_keys();
334
335 if identifier == "self" {
337 if let Some(resolved) = resolve_self_identity(&keys) {
338 return Ok(resolved);
339 }
340 let new_key = generate_and_save_key("self")?;
342 info!("Generated new identity: npub1{}", &new_key.pubkey_hex[..12]);
343 return Ok((new_key.pubkey_hex, new_key.secret_hex));
344 }
345
346 for key in &keys {
348 if key.petname.as_deref() == Some(identifier) {
349 return Ok((key.pubkey_hex.clone(), key.secret_hex.clone()));
350 }
351 }
352
353 if identifier.starts_with("npub1") {
355 let pk = PublicKey::parse(identifier)
356 .map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
357 let pubkey_hex = hex::encode(pk.to_bytes());
358
359 let secret = keys
361 .iter()
362 .find(|k| k.pubkey_hex == pubkey_hex)
363 .and_then(|k| k.secret_hex.clone());
364
365 return Ok((pubkey_hex, secret));
366 }
367
368 if identifier.len() == 64 && hex::decode(identifier).is_ok() {
370 let secret = keys
371 .iter()
372 .find(|k| k.pubkey_hex == identifier)
373 .and_then(|k| k.secret_hex.clone());
374
375 return Ok((identifier.to_string(), secret));
376 }
377
378 anyhow::bail!(
380 "Unknown identity '{}'. Add it to ~/.hashtree/aliases (preferred) or ~/.hashtree/keys, or use a pubkey/npub.",
381 identifier
382 )
383}
384
385fn generate_and_save_key(petname: &str) -> Result<StoredKey> {
387 use std::fs::{self, OpenOptions};
388 use std::io::Write;
389
390 let keys = nostr_sdk::Keys::generate();
392 let secret_hex = hex::encode(keys.secret_key().to_secret_bytes());
393 let pubkey_hex = hex::encode(keys.public_key().to_bytes());
394
395 let keys_path = hashtree_config::get_keys_path();
397 if let Some(parent) = keys_path.parent() {
398 fs::create_dir_all(parent)?;
399 }
400 ensure_aliases_file_hint();
401
402 let mut file = OpenOptions::new()
404 .create(true)
405 .append(true)
406 .open(&keys_path)?;
407
408 let nsec = keys
410 .secret_key()
411 .to_bech32()
412 .map_err(|e| anyhow::anyhow!("Failed to encode nsec: {}", e))?;
413 writeln!(file, "{} {}", nsec, petname)?;
414
415 info!(
416 "Saved new key to {:?} with petname '{}'",
417 keys_path, petname
418 );
419
420 Ok(StoredKey {
421 secret_hex: Some(secret_hex),
422 pubkey_hex,
423 petname: Some(petname.to_string()),
424 })
425}
426
427use hashtree_config::Config;
428
429fn pick_latest_event<'a, I>(events: I) -> Option<&'a Event>
430where
431 I: IntoIterator<Item = &'a Event>,
432{
433 events
435 .into_iter()
436 .max_by_key(|event| (event.created_at, event.id))
437}
438
439fn is_matching_repo_event(event: &Event, repo_name: &str) -> bool {
440 let has_hashtree_label = event.tags.iter().any(|tag| {
441 let slice = tag.as_slice();
442 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_HASHTREE
443 });
444
445 if !has_hashtree_label {
446 return false;
447 }
448
449 event.tags.iter().any(|tag| {
450 let slice = tag.as_slice();
451 slice.len() >= 2 && slice[0].as_str() == "d" && slice[1].as_str() == repo_name
452 })
453}
454
455fn pick_latest_repo_event<'a, I>(events: I, repo_name: &str) -> Option<&'a Event>
456where
457 I: IntoIterator<Item = &'a Event>,
458{
459 pick_latest_event(
460 events
461 .into_iter()
462 .filter(|event| is_matching_repo_event(event, repo_name)),
463 )
464}
465
466fn build_repo_event_filter(author: PublicKey, repo_name: &str) -> Filter {
467 Filter::new()
468 .kind(Kind::Custom(KIND_APP_DATA))
469 .author(author)
470 .custom_tag(SingleLetterTag::lowercase(Alphabet::D), vec![repo_name])
471 .custom_tag(
472 SingleLetterTag::lowercase(Alphabet::L),
473 vec![LABEL_HASHTREE],
474 )
475 .limit(50)
476}
477
478fn next_replaceable_created_at(now: Timestamp, latest_existing: Option<Timestamp>) -> Timestamp {
479 match latest_existing {
480 Some(latest) if latest >= now => Timestamp::from_secs(latest.as_u64().saturating_add(1)),
481 _ => now,
482 }
483}
484
485async fn latest_repo_event_created_at(
486 client: &Client,
487 author: PublicKey,
488 repo_name: &str,
489 timeout: Duration,
490) -> Option<Timestamp> {
491 let events = client
492 .get_events_of(
493 vec![build_repo_event_filter(author, repo_name)],
494 EventSource::relays(Some(timeout)),
495 )
496 .await
497 .ok()?;
498 pick_latest_repo_event(events.iter(), repo_name).map(|event| event.created_at)
499}
500
501fn append_repo_discovery_labels(tags: &mut Vec<Tag>, repo_name: &str) {
502 tags.push(Tag::custom(
503 TagKind::custom("l"),
504 vec![LABEL_GIT.to_string()],
505 ));
506
507 let parts: Vec<&str> = repo_name.split('/').collect();
510 for i in 1..parts.len() {
511 let prefix = parts[..i].join("/");
512 tags.push(Tag::custom(TagKind::custom("l"), vec![prefix]));
513 }
514}
515
516fn relay_host(url: &str) -> Option<&str> {
517 let stripped = url
518 .strip_prefix("ws://")
519 .or_else(|| url.strip_prefix("wss://"))
520 .or_else(|| url.strip_prefix("http://"))
521 .or_else(|| url.strip_prefix("https://"))
522 .unwrap_or(url);
523 let authority = stripped.split('/').next().unwrap_or(stripped);
524 if authority.is_empty() {
525 return None;
526 }
527
528 if let Some(host) = authority.strip_prefix('[') {
529 return host.split(']').next().filter(|value| !value.is_empty());
530 }
531
532 authority
533 .split(':')
534 .next()
535 .map(str::trim)
536 .filter(|value| !value.is_empty())
537}
538
539fn is_local_relay_url(url: &str) -> bool {
540 relay_host(url).is_some_and(|host| {
541 host.eq_ignore_ascii_case("localhost")
542 || host == "127.0.0.1"
543 || host == "::1"
544 || host.starts_with("127.")
545 })
546}
547
548fn has_non_local_relay(urls: &[String]) -> bool {
549 urls.iter().any(|url| !is_local_relay_url(url))
550}
551
552fn validate_repo_publish_relays(configured: &[String], connected: &[String]) -> Result<()> {
553 if connected.is_empty() {
554 anyhow::bail!(
555 "No relay confirmed repo publication. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml."
556 );
557 }
558
559 if has_non_local_relay(configured) && !has_non_local_relay(connected) {
560 anyhow::bail!(
561 "No public relay confirmed repo publication; local relays only: {}. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml.",
562 connected.join(", ")
563 );
564 }
565
566 Ok(())
567}
568
569fn latest_trusted_pr_status_kinds(
570 pr_events: &[Event],
571 status_events: &[Event],
572 repo_owner_pubkey: &str,
573) -> HashMap<String, u16> {
574 let pr_authors: HashMap<String, String> = pr_events
575 .iter()
576 .map(|event| (event.id.to_hex(), event.pubkey.to_hex()))
577 .collect();
578
579 let mut trusted_statuses: HashMap<String, Vec<&Event>> = HashMap::new();
580 for status in status_events {
581 let signer_pubkey = status.pubkey.to_hex();
582 for tag in status.tags.iter() {
583 let slice = tag.as_slice();
584 if slice.len() < 2 || slice[0].as_str() != "e" {
585 continue;
586 }
587
588 let pr_id = slice[1].to_string();
589 let Some(pr_author_pubkey) = pr_authors.get(&pr_id) else {
590 continue;
591 };
592
593 let trusted = if status.kind.as_u16() == KIND_STATUS_APPLIED {
594 signer_pubkey == repo_owner_pubkey
596 } else {
597 signer_pubkey == *pr_author_pubkey || signer_pubkey == repo_owner_pubkey
598 };
599 if trusted {
600 trusted_statuses.entry(pr_id).or_default().push(status);
601 }
602 }
603 }
604
605 let mut latest_status = HashMap::new();
606 for (pr_id, events) in trusted_statuses {
607 if let Some(applied) = pick_latest_event(
610 events
611 .iter()
612 .copied()
613 .filter(|event| event.kind.as_u16() == KIND_STATUS_APPLIED),
614 ) {
615 latest_status.insert(pr_id, applied.kind.as_u16());
616 } else if let Some(latest) = pick_latest_event(events.iter().copied()) {
617 latest_status.insert(pr_id, latest.kind.as_u16());
618 }
619 }
620
621 latest_status
622}
623
624#[derive(Debug, Clone)]
626pub struct RelayResult {
627 #[allow(dead_code)]
629 pub configured: Vec<String>,
630 pub connected: Vec<String>,
632 pub failed: Vec<String>,
634}
635
636#[derive(Debug, Clone)]
638pub struct BlossomResult {
639 #[allow(dead_code)]
641 pub configured: Vec<String>,
642 pub succeeded: Vec<String>,
644 pub failed: Vec<String>,
646}
647
648pub struct NostrClient {
650 pubkey: String,
651 keys: Option<Keys>,
653 relays: Vec<String>,
654 blossom: BlossomClient,
655 cached_refs: HashMap<String, HashMap<String, String>>,
657 cached_root_hash: HashMap<String, String>,
659 cached_encryption_key: HashMap<String, [u8; 32]>,
661 url_secret: Option<[u8; 32]>,
664 is_private: bool,
666 local_daemon_url: Option<String>,
668}
669
670#[derive(Debug, Clone, Default)]
671struct RootEventData {
672 root_hash: String,
673 encryption_key: Option<[u8; 32]>,
674 key_tag_name: Option<String>,
675 self_encrypted_ciphertext: Option<String>,
676}
677
678#[derive(Debug, Deserialize)]
679struct DaemonResolveResponse {
680 hash: Option<String>,
681 #[serde(default, rename = "key_tag")]
682 key: Option<String>,
683 #[serde(default, rename = "encryptedKey")]
684 encrypted_key: Option<String>,
685 #[serde(default, rename = "selfEncryptedKey")]
686 self_encrypted_key: Option<String>,
687 #[serde(default)]
688 source: Option<String>,
689}
690
691impl NostrClient {
692 pub fn new(
694 pubkey: &str,
695 secret_key: Option<String>,
696 url_secret: Option<[u8; 32]>,
697 is_private: bool,
698 config: &Config,
699 ) -> Result<Self> {
700 let _ = rustls::crypto::ring::default_provider().install_default();
702
703 let secret_key = secret_key.or_else(|| std::env::var("NOSTR_SECRET_KEY").ok());
705
706 let keys = if let Some(ref secret_hex) = secret_key {
708 let secret_bytes = hex::decode(secret_hex).context("Invalid secret key hex")?;
709 let secret = nostr::SecretKey::from_slice(&secret_bytes)
710 .map_err(|e| anyhow::anyhow!("Invalid secret key: {}", e))?;
711 Some(Keys::new(secret))
712 } else {
713 None
714 };
715
716 let blossom_keys = keys.clone().unwrap_or_else(Keys::generate);
719 let blossom = BlossomClient::new(blossom_keys).with_timeout(Duration::from_secs(30));
720
721 tracing::info!(
722 "BlossomClient created with read_servers: {:?}, write_servers: {:?}",
723 blossom.read_servers(),
724 blossom.write_servers()
725 );
726
727 let relays = hashtree_config::resolve_relays(
728 &config.nostr.relays,
729 Some(config.server.bind_address.as_str()),
730 );
731 let local_daemon_url =
732 hashtree_config::detect_local_daemon_url(Some(config.server.bind_address.as_str()))
733 .or_else(|| {
734 config
735 .blossom
736 .read_servers
737 .iter()
738 .find(|url| {
739 url.starts_with("http://127.0.0.1:")
740 || url.starts_with("http://localhost:")
741 })
742 .cloned()
743 });
744
745 Ok(Self {
746 pubkey: pubkey.to_string(),
747 keys,
748 relays,
749 blossom,
750 cached_refs: HashMap::new(),
751 cached_root_hash: HashMap::new(),
752 cached_encryption_key: HashMap::new(),
753 url_secret,
754 is_private,
755 local_daemon_url,
756 })
757 }
758
759 fn format_repo_author(pubkey_hex: &str) -> String {
760 PublicKey::from_hex(pubkey_hex)
761 .ok()
762 .and_then(|pk| pk.to_bech32().ok())
763 .unwrap_or_else(|| pubkey_hex.to_string())
764 }
765
766 #[allow(dead_code)]
768 pub fn can_sign(&self) -> bool {
769 self.keys.is_some()
770 }
771
772 pub fn fetch_refs(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
775 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 10)?;
776 Ok(refs)
777 }
778
779 #[allow(dead_code)]
782 pub fn fetch_refs_quick(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
783 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 3)?;
784 Ok(refs)
785 }
786
787 #[allow(dead_code)]
790 pub fn fetch_refs_with_root(&mut self, repo_name: &str) -> Result<FetchedRefs> {
791 self.fetch_refs_with_timeout(repo_name, 10)
792 }
793
794 fn fetch_refs_with_timeout(
796 &mut self,
797 repo_name: &str,
798 timeout_secs: u64,
799 ) -> Result<FetchedRefs> {
800 debug!(
801 "Fetching refs for {} from {} (timeout {}s)",
802 repo_name, self.pubkey, timeout_secs
803 );
804
805 if let Some(refs) = self.cached_refs.get(repo_name) {
807 let root = self.cached_root_hash.get(repo_name).cloned();
808 let key = self.cached_encryption_key.get(repo_name).cloned();
809 return Ok((refs.clone(), root, key));
810 }
811
812 let rt = tokio::runtime::Builder::new_multi_thread()
815 .enable_all()
816 .build()
817 .context("Failed to create tokio runtime")?;
818
819 let (refs, root_hash, encryption_key) =
820 rt.block_on(self.fetch_refs_async_with_timeout(repo_name, timeout_secs))?;
821 self.cached_refs.insert(repo_name.to_string(), refs.clone());
822 if let Some(ref root) = root_hash {
823 self.cached_root_hash
824 .insert(repo_name.to_string(), root.clone());
825 }
826 if let Some(key) = encryption_key {
827 self.cached_encryption_key
828 .insert(repo_name.to_string(), key);
829 }
830 Ok((refs, root_hash, encryption_key))
831 }
832
833 fn parse_root_event_data_from_event(event: &Event) -> RootEventData {
834 let root_hash = event
835 .tags
836 .iter()
837 .find(|t| t.as_slice().len() >= 2 && t.as_slice()[0].as_str() == "hash")
838 .map(|t| t.as_slice()[1].to_string())
839 .unwrap_or_else(|| event.content.to_string());
840
841 let (encryption_key, key_tag_name, self_encrypted_ciphertext) = event
842 .tags
843 .iter()
844 .find_map(|t| {
845 let slice = t.as_slice();
846 if slice.len() < 2 {
847 return None;
848 }
849 let tag_name = slice[0].as_str();
850 let tag_value = slice[1].to_string();
851 if tag_name == "selfEncryptedKey" {
852 return Some((None, Some(tag_name.to_string()), Some(tag_value)));
853 }
854 if tag_name == "key" || tag_name == "encryptedKey" {
855 if let Ok(bytes) = hex::decode(&tag_value) {
856 if bytes.len() == 32 {
857 let mut key = [0u8; 32];
858 key.copy_from_slice(&bytes);
859 return Some((Some(key), Some(tag_name.to_string()), None));
860 }
861 }
862 }
863 None
864 })
865 .unwrap_or((None, None, None));
866
867 RootEventData {
868 root_hash,
869 encryption_key,
870 key_tag_name,
871 self_encrypted_ciphertext,
872 }
873 }
874
875 fn parse_daemon_response_to_root_data(
876 response: DaemonResolveResponse,
877 ) -> Option<RootEventData> {
878 let root_hash = response.hash?;
879 if root_hash.is_empty() {
880 return None;
881 }
882
883 let mut data = RootEventData {
884 root_hash,
885 encryption_key: None,
886 key_tag_name: None,
887 self_encrypted_ciphertext: None,
888 };
889
890 if let Some(ciphertext) = response.self_encrypted_key {
891 data.key_tag_name = Some("selfEncryptedKey".to_string());
892 data.self_encrypted_ciphertext = Some(ciphertext);
893 return Some(data);
894 }
895
896 let (tag_name, tag_value) = if let Some(v) = response.encrypted_key {
897 ("encryptedKey", v)
898 } else if let Some(v) = response.key {
899 ("key", v)
900 } else {
901 return Some(data);
902 };
903
904 if let Ok(bytes) = hex::decode(&tag_value) {
905 if bytes.len() == 32 {
906 let mut key = [0u8; 32];
907 key.copy_from_slice(&bytes);
908 data.encryption_key = Some(key);
909 data.key_tag_name = Some(tag_name.to_string());
910 }
911 }
912
913 Some(data)
914 }
915
916 async fn fetch_root_from_local_daemon(
917 &self,
918 repo_name: &str,
919 timeout: Duration,
920 ) -> Option<RootEventData> {
921 let base = self.local_daemon_url.as_ref()?;
922 let url = format!(
923 "{}/api/nostr/resolve/{}/{}",
924 base.trim_end_matches('/'),
925 self.pubkey,
926 repo_name
927 );
928
929 let client = reqwest::Client::builder().timeout(timeout).build().ok()?;
930 let response = client.get(&url).send().await.ok()?;
931 if !response.status().is_success() {
932 return None;
933 }
934
935 let payload: DaemonResolveResponse = response.json().await.ok()?;
936 let source = payload
937 .source
938 .clone()
939 .unwrap_or_else(|| "unknown".to_string());
940 let parsed = Self::parse_daemon_response_to_root_data(payload)?;
941 debug!(
942 "Resolved repo {} via local daemon source={}",
943 repo_name, source
944 );
945 Some(parsed)
946 }
947
948 async fn fetch_refs_async_with_timeout(
949 &self,
950 repo_name: &str,
951 timeout_secs: u64,
952 ) -> Result<(HashMap<String, String>, Option<String>, Option<[u8; 32]>)> {
953 let client = Client::default();
955
956 for relay in &self.relays {
958 if let Err(e) = client.add_relay(relay).await {
959 warn!("Failed to add relay {}: {}", relay, e);
960 }
961 }
962
963 client.connect().await;
965
966 let connect_timeout = Duration::from_secs(2);
967 let query_timeout = Duration::from_secs(timeout_secs.saturating_sub(2).max(3));
968 let local_daemon_timeout = Duration::from_secs(4);
969 let retry_delay = Duration::from_millis(300);
970 let max_attempts = 2;
971
972 let start = std::time::Instant::now();
973
974 let author = PublicKey::from_hex(&self.pubkey)
976 .map_err(|e| anyhow::anyhow!("Invalid pubkey: {}", e))?;
977
978 let filter = build_repo_event_filter(author, repo_name);
979
980 debug!("Querying relays for repo {} events", repo_name);
981
982 let mut root_data = None;
983 for attempt in 1..=max_attempts {
984 let connect_start = std::time::Instant::now();
987 let mut last_log = std::time::Instant::now();
988 let mut has_connected_relay = false;
989 loop {
990 let relays = client.relays().await;
991 let total = relays.len();
992 let mut connected = 0;
993 for relay in relays.values() {
994 if relay.is_connected().await {
995 connected += 1;
996 }
997 }
998 if connected > 0 {
999 debug!(
1000 "Connected to {}/{} relay(s) in {:?} (attempt {}/{})",
1001 connected,
1002 total,
1003 start.elapsed(),
1004 attempt,
1005 max_attempts
1006 );
1007 has_connected_relay = true;
1008 break;
1009 }
1010 if last_log.elapsed() > Duration::from_millis(500) {
1011 debug!(
1012 "Connecting to relays... (0/{} after {:?}, attempt {}/{})",
1013 total,
1014 start.elapsed(),
1015 attempt,
1016 max_attempts
1017 );
1018 last_log = std::time::Instant::now();
1019 }
1020 if connect_start.elapsed() > connect_timeout {
1021 debug!(
1022 "Timeout waiting for relay connections - continuing with local-daemon fallback"
1023 );
1024 break;
1025 }
1026 tokio::time::sleep(Duration::from_millis(50)).await;
1027 }
1028
1029 let events = if has_connected_relay {
1033 match client
1034 .get_events_of(
1035 vec![filter.clone()],
1036 EventSource::relays(Some(query_timeout)),
1037 )
1038 .await
1039 {
1040 Ok(events) => events,
1041 Err(e) => {
1042 warn!("Failed to fetch events: {}", e);
1043 vec![]
1044 }
1045 }
1046 } else {
1047 vec![]
1048 };
1049
1050 debug!(
1051 "Got {} events from relays on attempt {}/{}",
1052 events.len(),
1053 attempt,
1054 max_attempts
1055 );
1056 let relay_event = pick_latest_repo_event(events.iter(), repo_name);
1057
1058 if let Some(event) = relay_event {
1059 debug!(
1060 "Found relay event with root hash: {}",
1061 &event.content[..12.min(event.content.len())]
1062 );
1063 root_data = Some(Self::parse_root_event_data_from_event(event));
1064 break;
1065 }
1066
1067 if let Some(data) = self
1068 .fetch_root_from_local_daemon(repo_name, local_daemon_timeout)
1069 .await
1070 {
1071 root_data = Some(data);
1072 break;
1073 }
1074
1075 if attempt < max_attempts {
1076 debug!(
1077 "No hashtree event found for {} on attempt {}/{}; retrying",
1078 repo_name, attempt, max_attempts
1079 );
1080 tokio::time::sleep(retry_delay).await;
1081 }
1082 }
1083
1084 let _ = client.disconnect().await;
1086
1087 let root_data = match root_data {
1088 Some(data) => data,
1089 None => {
1090 anyhow::bail!(
1091 "Repository '{}' not found (no hashtree event published by {})",
1092 repo_name,
1093 Self::format_repo_author(&self.pubkey)
1094 );
1095 }
1096 };
1097
1098 let root_hash = root_data.root_hash;
1099
1100 if root_hash.is_empty() {
1101 debug!("Empty root hash in event");
1102 return Ok((HashMap::new(), None, None));
1103 }
1104
1105 let encryption_key = root_data.encryption_key;
1106 let key_tag_name = root_data.key_tag_name;
1107 let self_encrypted_ciphertext = root_data.self_encrypted_ciphertext;
1108
1109 let unmasked_key = match key_tag_name.as_deref() {
1111 Some("encryptedKey") => {
1112 if let (Some(masked), Some(secret)) = (encryption_key, self.url_secret) {
1114 let mut unmasked = [0u8; 32];
1115 for i in 0..32 {
1116 unmasked[i] = masked[i] ^ secret[i];
1117 }
1118 Some(unmasked)
1119 } else {
1120 anyhow::bail!(
1121 "This repo is link-visible and requires a secret key.\n\
1122 Use: htree://.../{repo_name}#k=<secret>\n\
1123 Ask the repo owner for the full URL with the secret."
1124 );
1125 }
1126 }
1127 Some("selfEncryptedKey") => {
1128 if !self.is_private {
1130 anyhow::bail!(
1131 "This repo is private (author-only).\n\
1132 Use: htree://.../{repo_name}#private\n\
1133 Only the author can access this repo."
1134 );
1135 }
1136
1137 if let Some(keys) = &self.keys {
1139 if let Some(ciphertext) = self_encrypted_ciphertext {
1140 let pubkey = keys.public_key();
1142 match nip44::decrypt(keys.secret_key(), &pubkey, &ciphertext) {
1143 Ok(key_hex) => {
1144 let key_bytes =
1145 hex::decode(&key_hex).context("Invalid decrypted key hex")?;
1146 if key_bytes.len() != 32 {
1147 anyhow::bail!("Decrypted key wrong length");
1148 }
1149 let mut key = [0u8; 32];
1150 key.copy_from_slice(&key_bytes);
1151 Some(key)
1152 }
1153 Err(e) => {
1154 anyhow::bail!(
1155 "Failed to decrypt private repo: {}\n\
1156 The repo may be corrupted or published with a different key.",
1157 e
1158 );
1159 }
1160 }
1161 } else {
1162 anyhow::bail!("selfEncryptedKey tag has invalid format");
1163 }
1164 } else {
1165 anyhow::bail!(
1166 "Cannot access this private repo.\n\
1167 Private repos can only be accessed by their author.\n\
1168 You don't have the secret key for this repo's owner."
1169 );
1170 }
1171 }
1172 Some("key") | None => {
1173 encryption_key
1175 }
1176 Some(other) => {
1177 warn!("Unknown key tag type: {}", other);
1178 encryption_key
1179 }
1180 };
1181
1182 info!(
1183 "Found root hash {} for {} (encrypted: {}, link_visible: {})",
1184 &root_hash[..12.min(root_hash.len())],
1185 repo_name,
1186 unmasked_key.is_some(),
1187 self.url_secret.is_some()
1188 );
1189
1190 let refs = self
1192 .fetch_refs_from_hashtree(&root_hash, unmasked_key.as_ref())
1193 .await?;
1194 Ok((refs, Some(root_hash), unmasked_key))
1195 }
1196
1197 fn decrypt_and_decode(
1199 &self,
1200 data: &[u8],
1201 key: Option<&[u8; 32]>,
1202 ) -> Option<hashtree_core::TreeNode> {
1203 let decrypted_data: Vec<u8>;
1204 let data_to_decode = if let Some(k) = key {
1205 match decrypt_chk(data, k) {
1206 Ok(d) => {
1207 decrypted_data = d;
1208 &decrypted_data
1209 }
1210 Err(e) => {
1211 debug!("Decryption failed: {}", e);
1212 return None;
1213 }
1214 }
1215 } else {
1216 data
1217 };
1218
1219 match decode_tree_node(data_to_decode) {
1220 Ok(node) => Some(node),
1221 Err(e) => {
1222 debug!("Failed to decode tree node: {}", e);
1223 None
1224 }
1225 }
1226 }
1227
1228 async fn fetch_refs_from_hashtree(
1231 &self,
1232 root_hash: &str,
1233 encryption_key: Option<&[u8; 32]>,
1234 ) -> Result<HashMap<String, String>> {
1235 let mut refs = HashMap::new();
1236 debug!(
1237 "fetch_refs_from_hashtree: downloading root {}",
1238 &root_hash[..12]
1239 );
1240
1241 let root_data = match self.blossom.download(root_hash).await {
1243 Ok(data) => {
1244 debug!("Downloaded {} bytes from blossom", data.len());
1245 data
1246 }
1247 Err(e) => {
1248 anyhow::bail!(
1249 "Failed to download root hash {}: {}",
1250 &root_hash[..12.min(root_hash.len())],
1251 e
1252 );
1253 }
1254 };
1255
1256 let root_node = match self.decrypt_and_decode(&root_data, encryption_key) {
1258 Some(node) => {
1259 debug!("Decoded root node with {} links", node.links.len());
1260 node
1261 }
1262 None => {
1263 debug!(
1264 "Failed to decode root node (encryption_key: {})",
1265 encryption_key.is_some()
1266 );
1267 return Ok(refs);
1268 }
1269 };
1270
1271 debug!(
1273 "Root links: {:?}",
1274 root_node
1275 .links
1276 .iter()
1277 .map(|l| l.name.as_deref())
1278 .collect::<Vec<_>>()
1279 );
1280 let git_link = root_node
1281 .links
1282 .iter()
1283 .find(|l| l.name.as_deref() == Some(".git"));
1284 let (git_hash, git_key) = match git_link {
1285 Some(link) => {
1286 debug!("Found .git link with key: {}", link.key.is_some());
1287 (hex::encode(link.hash), link.key)
1288 }
1289 None => {
1290 debug!("No .git directory in hashtree root");
1291 return Ok(refs);
1292 }
1293 };
1294
1295 let git_data = match self.blossom.download(&git_hash).await {
1297 Ok(data) => data,
1298 Err(e) => {
1299 anyhow::bail!(
1300 "Failed to download .git directory ({}): {}",
1301 &git_hash[..12],
1302 e
1303 );
1304 }
1305 };
1306
1307 let git_node = match self.decrypt_and_decode(&git_data, git_key.as_ref()) {
1308 Some(node) => {
1309 debug!(
1310 "Decoded .git node with {} links: {:?}",
1311 node.links.len(),
1312 node.links
1313 .iter()
1314 .map(|l| l.name.as_deref())
1315 .collect::<Vec<_>>()
1316 );
1317 node
1318 }
1319 None => {
1320 debug!("Failed to decode .git node (key: {})", git_key.is_some());
1321 return Ok(refs);
1322 }
1323 };
1324
1325 let refs_link = git_node
1327 .links
1328 .iter()
1329 .find(|l| l.name.as_deref() == Some("refs"));
1330 let (refs_hash, refs_key) = match refs_link {
1331 Some(link) => (hex::encode(link.hash), link.key),
1332 None => {
1333 debug!("No refs directory in .git");
1334 return Ok(refs);
1335 }
1336 };
1337
1338 let refs_data = match self.blossom.try_download(&refs_hash).await {
1340 Some(data) => data,
1341 None => {
1342 debug!("Could not download refs directory");
1343 return Ok(refs);
1344 }
1345 };
1346
1347 let refs_node = match self.decrypt_and_decode(&refs_data, refs_key.as_ref()) {
1348 Some(node) => node,
1349 None => {
1350 return Ok(refs);
1351 }
1352 };
1353
1354 if let Some(head_link) = git_node
1356 .links
1357 .iter()
1358 .find(|l| l.name.as_deref() == Some("HEAD"))
1359 {
1360 let head_hash = hex::encode(head_link.hash);
1361 if let Some(head_data) = self.blossom.try_download(&head_hash).await {
1362 let head_content = if let Some(k) = head_link.key.as_ref() {
1364 match decrypt_chk(&head_data, k) {
1365 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1366 Err(_) => String::from_utf8_lossy(&head_data).trim().to_string(),
1367 }
1368 } else {
1369 String::from_utf8_lossy(&head_data).trim().to_string()
1370 };
1371 refs.insert("HEAD".to_string(), head_content);
1372 }
1373 }
1374
1375 for subdir_link in &refs_node.links {
1377 if subdir_link.link_type != LinkType::Dir {
1378 continue;
1379 }
1380 let subdir_name = match &subdir_link.name {
1381 Some(n) => n.clone(),
1382 None => continue,
1383 };
1384 let subdir_hash = hex::encode(subdir_link.hash);
1385
1386 self.collect_refs_recursive(
1387 &subdir_hash,
1388 subdir_link.key.as_ref(),
1389 &format!("refs/{}", subdir_name),
1390 &mut refs,
1391 )
1392 .await;
1393 }
1394
1395 debug!("Found {} refs from hashtree", refs.len());
1396 Ok(refs)
1397 }
1398
1399 async fn collect_refs_recursive(
1401 &self,
1402 dir_hash: &str,
1403 dir_key: Option<&[u8; 32]>,
1404 prefix: &str,
1405 refs: &mut HashMap<String, String>,
1406 ) {
1407 let dir_data = match self.blossom.try_download(dir_hash).await {
1408 Some(data) => data,
1409 None => return,
1410 };
1411
1412 let dir_node = match self.decrypt_and_decode(&dir_data, dir_key) {
1413 Some(node) => node,
1414 None => return,
1415 };
1416
1417 for link in &dir_node.links {
1418 let name = match &link.name {
1419 Some(n) => n.clone(),
1420 None => continue,
1421 };
1422 let link_hash = hex::encode(link.hash);
1423 let ref_path = format!("{}/{}", prefix, name);
1424
1425 if link.link_type == LinkType::Dir {
1426 Box::pin(self.collect_refs_recursive(
1428 &link_hash,
1429 link.key.as_ref(),
1430 &ref_path,
1431 refs,
1432 ))
1433 .await;
1434 } else {
1435 if let Some(ref_data) = self.blossom.try_download(&link_hash).await {
1437 let sha = if let Some(k) = link.key.as_ref() {
1439 match decrypt_chk(&ref_data, k) {
1440 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1441 Err(_) => String::from_utf8_lossy(&ref_data).trim().to_string(),
1442 }
1443 } else {
1444 String::from_utf8_lossy(&ref_data).trim().to_string()
1445 };
1446 if !sha.is_empty() {
1447 debug!("Found ref {} -> {}", ref_path, sha);
1448 refs.insert(ref_path, sha);
1449 }
1450 }
1451 }
1452 }
1453 }
1454
1455 #[allow(dead_code)]
1457 pub fn update_ref(&mut self, repo_name: &str, ref_name: &str, sha: &str) -> Result<()> {
1458 info!("Updating ref {} -> {} for {}", ref_name, sha, repo_name);
1459
1460 let refs = self.cached_refs.entry(repo_name.to_string()).or_default();
1461 refs.insert(ref_name.to_string(), sha.to_string());
1462
1463 Ok(())
1464 }
1465
1466 pub fn delete_ref(&mut self, repo_name: &str, ref_name: &str) -> Result<()> {
1468 info!("Deleting ref {} for {}", ref_name, repo_name);
1469
1470 if let Some(refs) = self.cached_refs.get_mut(repo_name) {
1471 refs.remove(ref_name);
1472 }
1473
1474 Ok(())
1475 }
1476
1477 pub fn get_cached_root_hash(&self, repo_name: &str) -> Option<&String> {
1479 self.cached_root_hash.get(repo_name)
1480 }
1481
1482 pub fn get_cached_encryption_key(&self, repo_name: &str) -> Option<&[u8; 32]> {
1484 self.cached_encryption_key.get(repo_name)
1485 }
1486
1487 pub fn blossom(&self) -> &BlossomClient {
1489 &self.blossom
1490 }
1491
1492 pub fn relay_urls(&self) -> Vec<String> {
1494 self.relays.clone()
1495 }
1496
1497 #[allow(dead_code)]
1499 pub fn pubkey(&self) -> &str {
1500 &self.pubkey
1501 }
1502
1503 pub fn npub(&self) -> String {
1505 PublicKey::from_hex(&self.pubkey)
1506 .ok()
1507 .and_then(|pk| pk.to_bech32().ok())
1508 .unwrap_or_else(|| self.pubkey.clone())
1509 }
1510
1511 pub fn publish_repo(
1519 &self,
1520 repo_name: &str,
1521 root_hash: &str,
1522 encryption_key: Option<(&[u8; 32], bool, bool)>,
1523 ) -> Result<(String, RelayResult)> {
1524 let keys = self.keys.as_ref().context(format!(
1525 "Cannot push: no secret key for {}. You can only push to your own repos.",
1526 &self.pubkey[..16]
1527 ))?;
1528
1529 info!(
1530 "Publishing repo {} with root hash {} (encrypted: {})",
1531 repo_name,
1532 root_hash,
1533 encryption_key.is_some()
1534 );
1535
1536 let rt = tokio::runtime::Builder::new_multi_thread()
1538 .enable_all()
1539 .build()
1540 .context("Failed to create tokio runtime")?;
1541
1542 let result =
1543 rt.block_on(self.publish_repo_async(keys, repo_name, root_hash, encryption_key));
1544
1545 rt.shutdown_timeout(std::time::Duration::from_millis(500));
1548
1549 result
1550 }
1551
1552 async fn publish_repo_async(
1553 &self,
1554 keys: &Keys,
1555 repo_name: &str,
1556 root_hash: &str,
1557 encryption_key: Option<(&[u8; 32], bool, bool)>,
1558 ) -> Result<(String, RelayResult)> {
1559 let client = Client::new(keys.clone());
1561
1562 let configured: Vec<String> = self.relays.clone();
1563 let mut connected: Vec<String> = Vec::new();
1564 let mut failed: Vec<String> = Vec::new();
1565
1566 for relay in &self.relays {
1568 if let Err(e) = client.add_relay(relay).await {
1569 warn!("Failed to add relay {}: {}", relay, e);
1570 failed.push(relay.clone());
1571 }
1572 }
1573
1574 client.connect().await;
1576
1577 let connect_timeout = Duration::from_secs(3);
1579 let start = std::time::Instant::now();
1580 loop {
1581 let relays = client.relays().await;
1582 let mut any_connected = false;
1583 for (_url, relay) in relays.iter() {
1584 if relay.is_connected().await {
1585 any_connected = true;
1586 break;
1587 }
1588 }
1589 if any_connected {
1590 break;
1591 }
1592 if start.elapsed() > connect_timeout {
1593 break;
1594 }
1595 tokio::time::sleep(Duration::from_millis(50)).await;
1596 }
1597
1598 let publish_created_at = next_replaceable_created_at(
1599 Timestamp::now(),
1600 latest_repo_event_created_at(
1601 &client,
1602 keys.public_key(),
1603 repo_name,
1604 Duration::from_secs(2),
1605 )
1606 .await,
1607 );
1608
1609 let mut tags = vec![
1611 Tag::custom(TagKind::custom("d"), vec![repo_name.to_string()]),
1612 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
1613 Tag::custom(TagKind::custom("hash"), vec![root_hash.to_string()]),
1614 ];
1615
1616 if let Some((key, is_link_visible, is_self_private)) = encryption_key {
1622 if is_self_private {
1623 let pubkey = keys.public_key();
1625 let key_hex = hex::encode(key);
1626 let encrypted =
1627 nip44::encrypt(keys.secret_key(), &pubkey, &key_hex, nip44::Version::V2)
1628 .map_err(|e| anyhow::anyhow!("NIP-44 encryption failed: {}", e))?;
1629 tags.push(Tag::custom(
1630 TagKind::custom("selfEncryptedKey"),
1631 vec![encrypted],
1632 ));
1633 } else if is_link_visible {
1634 tags.push(Tag::custom(
1636 TagKind::custom("encryptedKey"),
1637 vec![hex::encode(key)],
1638 ));
1639 } else {
1640 tags.push(Tag::custom(TagKind::custom("key"), vec![hex::encode(key)]));
1642 }
1643 }
1644
1645 append_repo_discovery_labels(&mut tags, repo_name);
1646
1647 let event = EventBuilder::new(Kind::Custom(KIND_APP_DATA), root_hash, tags)
1649 .custom_created_at(publish_created_at)
1650 .to_event(keys)
1651 .map_err(|e| anyhow::anyhow!("Failed to sign event: {}", e))?;
1652
1653 match client.send_event(event.clone()).await {
1655 Ok(output) => {
1656 for url in output.success.iter() {
1658 let url_str = url.to_string();
1659 if !connected.contains(&url_str) {
1660 connected.push(url_str);
1661 }
1662 }
1663 for (url, err) in output.failed.iter() {
1665 if err.is_some() {
1666 let url_str = url.to_string();
1667 if !failed.contains(&url_str) && !connected.contains(&url_str) {
1668 failed.push(url_str);
1669 }
1670 }
1671 }
1672 info!(
1673 "Sent event {} to {} relays ({} failed)",
1674 output.id(),
1675 output.success.len(),
1676 output.failed.len()
1677 );
1678 }
1679 Err(e) => {
1680 warn!("Failed to send event: {}", e);
1681 for relay in &self.relays {
1683 if !failed.contains(relay) {
1684 failed.push(relay.clone());
1685 }
1686 }
1687 }
1688 };
1689
1690 let npub_url = keys
1692 .public_key()
1693 .to_bech32()
1694 .map(|npub| format!("htree://{}/{}", npub, repo_name))
1695 .unwrap_or_else(|_| format!("htree://{}/{}", &self.pubkey[..16], repo_name));
1696
1697 let relay_validation = validate_repo_publish_relays(&configured, &connected);
1698
1699 let _ = client.disconnect().await;
1701 tokio::time::sleep(Duration::from_millis(50)).await;
1702
1703 relay_validation?;
1704
1705 Ok((
1706 npub_url,
1707 RelayResult {
1708 configured,
1709 connected,
1710 failed,
1711 },
1712 ))
1713 }
1714
1715 pub fn fetch_prs(
1717 &self,
1718 repo_name: &str,
1719 state_filter: PullRequestStateFilter,
1720 ) -> Result<Vec<PullRequestListItem>> {
1721 let rt = tokio::runtime::Builder::new_multi_thread()
1722 .enable_all()
1723 .build()
1724 .context("Failed to create tokio runtime")?;
1725
1726 let result = rt.block_on(self.fetch_prs_async(repo_name, state_filter));
1727 rt.shutdown_timeout(Duration::from_millis(500));
1728 result
1729 }
1730
1731 pub async fn fetch_prs_async(
1732 &self,
1733 repo_name: &str,
1734 state_filter: PullRequestStateFilter,
1735 ) -> Result<Vec<PullRequestListItem>> {
1736 let client = Client::default();
1737
1738 for relay in &self.relays {
1739 if let Err(e) = client.add_relay(relay).await {
1740 warn!("Failed to add relay {}: {}", relay, e);
1741 }
1742 }
1743 client.connect().await;
1744
1745 let start = std::time::Instant::now();
1747 loop {
1748 let relays = client.relays().await;
1749 let mut connected = false;
1750 for relay in relays.values() {
1751 if relay.is_connected().await {
1752 connected = true;
1753 break;
1754 }
1755 }
1756 if connected {
1757 break;
1758 }
1759 if start.elapsed() > Duration::from_secs(2) {
1760 let _ = client.disconnect().await;
1761 return Err(anyhow::anyhow!(
1762 "Failed to connect to any relay while fetching PRs"
1763 ));
1764 }
1765 tokio::time::sleep(Duration::from_millis(50)).await;
1766 }
1767
1768 let repo_address = format!("{}:{}:{}", KIND_REPO_ANNOUNCEMENT, self.pubkey, repo_name);
1770 let pr_filter = Filter::new()
1771 .kind(Kind::Custom(KIND_PULL_REQUEST))
1772 .custom_tag(SingleLetterTag::lowercase(Alphabet::A), vec![&repo_address]);
1773
1774 let pr_events = match tokio::time::timeout(
1775 Duration::from_secs(3),
1776 client.get_events_of(vec![pr_filter], EventSource::relays(None)),
1777 )
1778 .await
1779 {
1780 Ok(Ok(events)) => events,
1781 Ok(Err(e)) => {
1782 let _ = client.disconnect().await;
1783 return Err(anyhow::anyhow!(
1784 "Failed to fetch PR events from relays: {}",
1785 e
1786 ));
1787 }
1788 Err(_) => {
1789 let _ = client.disconnect().await;
1790 return Err(anyhow::anyhow!("Timed out fetching PR events from relays"));
1791 }
1792 };
1793
1794 if pr_events.is_empty() {
1795 let _ = client.disconnect().await;
1796 return Ok(Vec::new());
1797 }
1798
1799 let pr_ids: Vec<String> = pr_events.iter().map(|e| e.id.to_hex()).collect();
1801
1802 let status_filter = Filter::new()
1804 .kinds(vec![
1805 Kind::Custom(KIND_STATUS_OPEN),
1806 Kind::Custom(KIND_STATUS_APPLIED),
1807 Kind::Custom(KIND_STATUS_CLOSED),
1808 Kind::Custom(KIND_STATUS_DRAFT),
1809 ])
1810 .custom_tag(
1811 SingleLetterTag::lowercase(Alphabet::E),
1812 pr_ids.iter().map(|s| s.as_str()).collect::<Vec<_>>(),
1813 );
1814
1815 let status_events = match tokio::time::timeout(
1816 Duration::from_secs(3),
1817 client.get_events_of(vec![status_filter], EventSource::relays(None)),
1818 )
1819 .await
1820 {
1821 Ok(Ok(events)) => events,
1822 Ok(Err(e)) => {
1823 let _ = client.disconnect().await;
1824 return Err(anyhow::anyhow!(
1825 "Failed to fetch PR status events from relays: {}",
1826 e
1827 ));
1828 }
1829 Err(_) => {
1830 let _ = client.disconnect().await;
1831 return Err(anyhow::anyhow!(
1832 "Timed out fetching PR status events from relays"
1833 ));
1834 }
1835 };
1836
1837 let _ = client.disconnect().await;
1838
1839 let latest_status =
1841 latest_trusted_pr_status_kinds(&pr_events, &status_events, &self.pubkey);
1842
1843 let mut prs = Vec::new();
1844 for event in &pr_events {
1845 let pr_id = event.id.to_hex();
1846 let state =
1847 PullRequestState::from_latest_status_kind(latest_status.get(&pr_id).copied());
1848 if !state_filter.includes(state) {
1849 continue;
1850 }
1851
1852 let mut subject = None;
1853 let mut commit_tip = None;
1854 let mut branch = None;
1855 let mut target_branch = None;
1856
1857 for tag in event.tags.iter() {
1858 let slice = tag.as_slice();
1859 if slice.len() >= 2 {
1860 match slice[0].as_str() {
1861 "subject" => subject = Some(slice[1].to_string()),
1862 "c" => commit_tip = Some(slice[1].to_string()),
1863 "branch" => branch = Some(slice[1].to_string()),
1864 "target-branch" => target_branch = Some(slice[1].to_string()),
1865 _ => {}
1866 }
1867 }
1868 }
1869
1870 prs.push(PullRequestListItem {
1871 event_id: pr_id,
1872 author_pubkey: event.pubkey.to_hex(),
1873 state,
1874 subject,
1875 commit_tip,
1876 branch,
1877 target_branch,
1878 created_at: event.created_at.as_u64(),
1879 });
1880 }
1881
1882 prs.sort_by(|left, right| {
1884 right
1885 .created_at
1886 .cmp(&left.created_at)
1887 .then_with(|| right.event_id.cmp(&left.event_id))
1888 });
1889
1890 debug!(
1891 "Found {} PRs for {} (filter: {:?})",
1892 prs.len(),
1893 repo_name,
1894 state_filter
1895 );
1896 Ok(prs)
1897 }
1898
1899 pub fn publish_pr_merged_status(
1901 &self,
1902 pr_event_id: &str,
1903 pr_author_pubkey: &str,
1904 ) -> Result<()> {
1905 let keys = self
1906 .keys
1907 .as_ref()
1908 .context("Cannot publish status: no secret key")?;
1909
1910 let rt = tokio::runtime::Builder::new_multi_thread()
1911 .enable_all()
1912 .build()
1913 .context("Failed to create tokio runtime")?;
1914
1915 let result =
1916 rt.block_on(self.publish_pr_merged_status_async(keys, pr_event_id, pr_author_pubkey));
1917 rt.shutdown_timeout(Duration::from_millis(500));
1918 result
1919 }
1920
1921 async fn publish_pr_merged_status_async(
1922 &self,
1923 keys: &Keys,
1924 pr_event_id: &str,
1925 pr_author_pubkey: &str,
1926 ) -> Result<()> {
1927 let client = Client::new(keys.clone());
1928
1929 for relay in &self.relays {
1930 if let Err(e) = client.add_relay(relay).await {
1931 warn!("Failed to add relay {}: {}", relay, e);
1932 }
1933 }
1934 client.connect().await;
1935
1936 let start = std::time::Instant::now();
1938 loop {
1939 let relays = client.relays().await;
1940 let mut connected = false;
1941 for relay in relays.values() {
1942 if relay.is_connected().await {
1943 connected = true;
1944 break;
1945 }
1946 }
1947 if connected {
1948 break;
1949 }
1950 if start.elapsed() > Duration::from_secs(3) {
1951 anyhow::bail!("Failed to connect to any relay for status publish");
1952 }
1953 tokio::time::sleep(Duration::from_millis(50)).await;
1954 }
1955
1956 let tags = vec![
1957 Tag::custom(TagKind::custom("e"), vec![pr_event_id.to_string()]),
1958 Tag::custom(TagKind::custom("p"), vec![pr_author_pubkey.to_string()]),
1959 ];
1960
1961 let event = EventBuilder::new(Kind::Custom(KIND_STATUS_APPLIED), "", tags)
1962 .to_event(keys)
1963 .map_err(|e| anyhow::anyhow!("Failed to sign status event: {}", e))?;
1964
1965 let publish_result = match client.send_event(event).await {
1966 Ok(output) => {
1967 if output.success.is_empty() {
1968 Err(anyhow::anyhow!(
1969 "PR merged status was not confirmed by any relay"
1970 ))
1971 } else {
1972 info!(
1973 "Published PR merged status to {} relays",
1974 output.success.len()
1975 );
1976 Ok(())
1977 }
1978 }
1979 Err(e) => Err(anyhow::anyhow!("Failed to publish PR merged status: {}", e)),
1980 };
1981
1982 let _ = client.disconnect().await;
1983 tokio::time::sleep(Duration::from_millis(50)).await;
1984 publish_result
1985 }
1986
1987 #[allow(dead_code)]
1989 pub async fn upload_blob(&self, _hash: &str, data: &[u8]) -> Result<String> {
1990 let hash = self
1991 .blossom
1992 .upload(data)
1993 .await
1994 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))?;
1995 Ok(hash)
1996 }
1997
1998 #[allow(dead_code)]
2000 pub async fn upload_blob_if_missing(&self, data: &[u8]) -> Result<(String, bool)> {
2001 self.blossom
2002 .upload_if_missing(data)
2003 .await
2004 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))
2005 }
2006
2007 #[allow(dead_code)]
2009 pub async fn download_blob(&self, hash: &str) -> Result<Vec<u8>> {
2010 self.blossom
2011 .download(hash)
2012 .await
2013 .map_err(|e| anyhow::anyhow!("Blossom download failed: {}", e))
2014 }
2015
2016 #[allow(dead_code)]
2018 pub async fn try_download_blob(&self, hash: &str) -> Option<Vec<u8>> {
2019 self.blossom.try_download(hash).await
2020 }
2021}
2022
2023#[cfg(test)]
2024mod tests {
2025 use super::*;
2026
2027 const TEST_PUBKEY: &str = "4523be58d395b1b196a9b8c82b038b6895cb02b683d0c253a955068dba1facd0";
2028
2029 fn test_config() -> Config {
2030 Config::default()
2031 }
2032
2033 #[test]
2034 fn test_new_client() {
2035 let config = test_config();
2036 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2037 assert!(!client.relays.is_empty());
2038 assert!(!client.can_sign());
2039 }
2040
2041 #[test]
2042 fn test_new_client_with_secret() {
2043 let config = test_config();
2044 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2045 let client =
2046 NostrClient::new(TEST_PUBKEY, Some(secret.to_string()), None, false, &config).unwrap();
2047 assert!(client.can_sign());
2048 }
2049
2050 #[test]
2051 fn test_new_client_uses_local_read_server_as_daemon_fallback() {
2052 let mut config = test_config();
2053 config.server.bind_address = "127.0.0.1:1".to_string();
2054 config.blossom.read_servers = vec!["http://127.0.0.1:19092".to_string()];
2055
2056 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2057 assert_eq!(
2058 client.local_daemon_url.as_deref(),
2059 Some("http://127.0.0.1:19092")
2060 );
2061 }
2062
2063 #[test]
2064 fn test_fetch_refs_empty() {
2065 let config = test_config();
2066 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2067 let refs = client.cached_refs.get("new-repo");
2069 assert!(refs.is_none());
2070 }
2071
2072 #[test]
2073 fn test_validate_repo_publish_relays_allows_local_only_when_only_local_relays_configured() {
2074 let configured = vec!["ws://127.0.0.1:8080/ws".to_string()];
2075 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2076
2077 assert!(validate_repo_publish_relays(&configured, &connected).is_ok());
2078 }
2079
2080 #[test]
2081 fn test_validate_repo_publish_relays_rejects_local_only_when_public_relays_configured() {
2082 let configured = vec![
2083 "ws://127.0.0.1:8080/ws".to_string(),
2084 "wss://relay.damus.io".to_string(),
2085 ];
2086 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2087
2088 let err = validate_repo_publish_relays(&configured, &connected)
2089 .expect_err("should reject local-only publication");
2090 assert!(err.to_string().contains("No public relay confirmed"));
2091 assert!(err.to_string().contains("local relays only"));
2092 }
2093
2094 #[test]
2095 fn test_update_ref() {
2096 let config = test_config();
2097 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2098
2099 client
2100 .update_ref("repo", "refs/heads/main", "abc123")
2101 .unwrap();
2102
2103 let refs = client.cached_refs.get("repo").unwrap();
2104 assert_eq!(refs.get("refs/heads/main"), Some(&"abc123".to_string()));
2105 }
2106
2107 #[test]
2108 fn test_pick_latest_event_prefers_newer_timestamp() {
2109 let keys = Keys::generate();
2110 let older = Timestamp::from_secs(1_700_000_000);
2111 let newer = Timestamp::from_secs(1_700_000_001);
2112
2113 let event_old = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "old", [])
2114 .custom_created_at(older)
2115 .to_event(&keys)
2116 .unwrap();
2117 let event_new = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "new", [])
2118 .custom_created_at(newer)
2119 .to_event(&keys)
2120 .unwrap();
2121
2122 let picked = pick_latest_event([&event_old, &event_new]).unwrap();
2123 assert_eq!(picked.id, event_new.id);
2124 }
2125
2126 #[test]
2127 fn test_pick_latest_event_breaks_ties_with_event_id() {
2128 let keys = Keys::generate();
2129 let created_at = Timestamp::from_secs(1_700_000_000);
2130
2131 let event_a = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "a", [])
2132 .custom_created_at(created_at)
2133 .to_event(&keys)
2134 .unwrap();
2135 let event_b = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "b", [])
2136 .custom_created_at(created_at)
2137 .to_event(&keys)
2138 .unwrap();
2139
2140 let expected_id = if event_a.id > event_b.id {
2141 event_a.id
2142 } else {
2143 event_b.id
2144 };
2145 let picked = pick_latest_event([&event_a, &event_b]).unwrap();
2146 assert_eq!(picked.id, expected_id);
2147 }
2148
2149 #[test]
2150 fn test_next_replaceable_created_at_uses_now_when_existing_is_older() {
2151 let now = Timestamp::from_secs(1_700_000_010);
2152 let existing = Timestamp::from_secs(1_700_000_009);
2153
2154 assert_eq!(
2155 next_replaceable_created_at(now, Some(existing)),
2156 now,
2157 "older repo events should not delay a new publish"
2158 );
2159 }
2160
2161 #[test]
2162 fn test_next_replaceable_created_at_bumps_same_second_events() {
2163 let now = Timestamp::from_secs(1_700_000_010);
2164 let existing = Timestamp::from_secs(1_700_000_010);
2165
2166 assert_eq!(
2167 next_replaceable_created_at(now, Some(existing)),
2168 Timestamp::from_secs(1_700_000_011),
2169 "same-second repo publishes need a strictly newer timestamp"
2170 );
2171 }
2172
2173 #[test]
2174 fn test_pick_latest_repo_event_ignores_newer_different_d_tag() {
2175 let keys = Keys::generate();
2176 let older = Timestamp::from_secs(1_700_000_000);
2177 let newer = Timestamp::from_secs(1_700_000_031);
2178
2179 let iris_chat = EventBuilder::new(
2180 Kind::Custom(KIND_APP_DATA),
2181 "good",
2182 [
2183 Tag::custom(TagKind::custom("d"), vec!["iris-chat".to_string()]),
2184 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2185 ],
2186 )
2187 .custom_created_at(older)
2188 .to_event(&keys)
2189 .unwrap();
2190
2191 let iris_chat_flutter = EventBuilder::new(
2192 Kind::Custom(KIND_APP_DATA),
2193 "bad",
2194 [
2195 Tag::custom(TagKind::custom("d"), vec!["iris-chat-flutter".to_string()]),
2196 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2197 ],
2198 )
2199 .custom_created_at(newer)
2200 .to_event(&keys)
2201 .unwrap();
2202
2203 let picked = pick_latest_repo_event([&iris_chat, &iris_chat_flutter], "iris-chat").unwrap();
2204 assert_eq!(picked.id, iris_chat.id);
2205 }
2206
2207 #[test]
2208 fn test_append_repo_discovery_labels_includes_git_label_and_prefixes() {
2209 let mut tags = vec![];
2210 append_repo_discovery_labels(&mut tags, "tools/hashtree");
2211
2212 let values: Vec<String> = tags
2213 .iter()
2214 .filter_map(|tag| {
2215 let parts = tag.as_slice();
2216 if parts.first().map(|kind| kind.as_str()) != Some("l") {
2217 return None;
2218 }
2219 parts.get(1).cloned()
2220 })
2221 .collect();
2222
2223 assert!(values.iter().any(|value| value == LABEL_GIT));
2224 assert!(values.iter().any(|value| value == "tools"));
2225 }
2226
2227 #[test]
2228 fn test_parse_daemon_response_to_root_data_encrypted_key() {
2229 let payload = DaemonResolveResponse {
2230 hash: Some("ab".repeat(32)),
2231 key: None,
2232 encrypted_key: Some("11".repeat(32)),
2233 self_encrypted_key: None,
2234 source: Some("webrtc".to_string()),
2235 };
2236
2237 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2238 assert_eq!(parsed.root_hash, "ab".repeat(32));
2239 assert_eq!(parsed.key_tag_name.as_deref(), Some("encryptedKey"));
2240 assert!(parsed.self_encrypted_ciphertext.is_none());
2241 assert_eq!(parsed.encryption_key.unwrap(), [0x11; 32]);
2242 }
2243
2244 #[test]
2245 fn test_parse_daemon_response_to_root_data_self_encrypted() {
2246 let payload = DaemonResolveResponse {
2247 hash: Some("cd".repeat(32)),
2248 key: None,
2249 encrypted_key: None,
2250 self_encrypted_key: Some("ciphertext".to_string()),
2251 source: Some("webrtc".to_string()),
2252 };
2253
2254 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2255 assert_eq!(parsed.root_hash, "cd".repeat(32));
2256 assert_eq!(parsed.key_tag_name.as_deref(), Some("selfEncryptedKey"));
2257 assert_eq!(
2258 parsed.self_encrypted_ciphertext.as_deref(),
2259 Some("ciphertext")
2260 );
2261 assert!(parsed.encryption_key.is_none());
2262 }
2263
2264 #[tokio::test]
2265 async fn test_fetch_root_from_local_daemon_parses_response() {
2266 use axum::{extract::Path, routing::get, Json, Router};
2267 use serde_json::json;
2268
2269 let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
2270 let addr = listener.local_addr().unwrap();
2271 let app = Router::new().route(
2272 "/api/nostr/resolve/:pubkey/:treename",
2273 get(
2274 |Path((pubkey, treename)): Path<(String, String)>| async move {
2275 Json(json!({
2276 "key": format!("{}/{}", pubkey, treename),
2277 "hash": "ab".repeat(32),
2278 "source": "webrtc",
2279 "key_tag": "22".repeat(32),
2280 }))
2281 },
2282 ),
2283 );
2284
2285 let server = tokio::spawn(async move {
2286 let _ = axum::serve(listener, app).await;
2287 });
2288
2289 let config = test_config();
2290 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2291 client.local_daemon_url = Some(format!("http://{}", addr));
2292
2293 let resolved = client
2294 .fetch_root_from_local_daemon("repo", Duration::from_secs(2))
2295 .await
2296 .unwrap();
2297 assert_eq!(resolved.root_hash, "ab".repeat(32));
2298 assert_eq!(resolved.key_tag_name.as_deref(), Some("key"));
2299 assert_eq!(resolved.encryption_key, Some([0x22; 32]));
2300
2301 server.abort();
2302 }
2303
2304 #[test]
2305 fn test_stored_key_from_hex() {
2306 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2307 let key = StoredKey::from_secret_hex(secret, Some("test".to_string())).unwrap();
2308 assert_eq!(key.secret_hex.as_deref(), Some(secret));
2309 assert_eq!(key.petname, Some("test".to_string()));
2310 assert_eq!(key.pubkey_hex.len(), 64);
2311 }
2312
2313 #[test]
2314 fn test_stored_key_from_nsec() {
2315 let nsec = "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5";
2317 let key = StoredKey::from_nsec(nsec, None).unwrap();
2318 assert_eq!(key.secret_hex.as_deref().map(str::len), Some(64));
2319 assert_eq!(key.pubkey_hex.len(), 64);
2320 }
2321
2322 #[test]
2323 fn test_stored_key_from_npub_is_read_only() {
2324 let npub = "npub1xndmdgymsf4a34rzr7346vp8qcptxf75pjqweh8naa8rklgxpfqqmfjtce";
2325 let key = StoredKey::from_npub(npub, Some("sirius".to_string())).unwrap();
2326
2327 assert!(key.secret_hex.is_none());
2328 assert_eq!(key.petname.as_deref(), Some("sirius"));
2329 assert_eq!(key.pubkey_hex.len(), 64);
2330 }
2331
2332 #[test]
2333 fn test_resolve_self_identity_ignores_read_only_aliases() {
2334 let read_only = StoredKey::from_npub(
2335 "npub1xndmdgymsf4a34rzr7346vp8qcptxf75pjqweh8naa8rklgxpfqqmfjtce",
2336 Some("self".to_string()),
2337 )
2338 .unwrap();
2339 let signing = StoredKey::from_nsec(
2340 "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5",
2341 Some("work".to_string()),
2342 )
2343 .unwrap();
2344
2345 let resolved = resolve_self_identity(&[read_only, signing.clone()]).unwrap();
2346
2347 assert_eq!(resolved.0, signing.pubkey_hex);
2348 assert_eq!(resolved.1, signing.secret_hex);
2349 }
2350
2351 #[test]
2352 fn test_resolve_identity_hex_pubkey() {
2353 let result = resolve_identity(TEST_PUBKEY);
2355 assert!(result.is_ok());
2356 let (pubkey, secret) = result.unwrap();
2357 assert_eq!(pubkey, TEST_PUBKEY);
2358 assert!(secret.is_none());
2360 }
2361
2362 #[test]
2363 fn test_resolve_identity_npub() {
2364 let pk_bytes = hex::decode(TEST_PUBKEY).unwrap();
2366 let pk = PublicKey::from_slice(&pk_bytes).unwrap();
2367 let npub = pk.to_bech32().unwrap();
2368
2369 let result = resolve_identity(&npub);
2370 assert!(result.is_ok(), "Failed: {:?}", result.err());
2371 let (pubkey, _) = result.unwrap();
2372 assert_eq!(pubkey.len(), 64);
2374 assert_eq!(pubkey, TEST_PUBKEY);
2375 }
2376
2377 #[test]
2378 fn test_format_repo_author_uses_full_npub() {
2379 let formatted = NostrClient::format_repo_author(TEST_PUBKEY);
2380 let expected = PublicKey::from_hex(TEST_PUBKEY)
2381 .unwrap()
2382 .to_bech32()
2383 .unwrap();
2384
2385 assert_eq!(formatted, expected);
2386 assert!(!formatted.contains("..."));
2387 }
2388
2389 #[test]
2390 fn test_resolve_identity_unknown_petname() {
2391 let result = resolve_identity("nonexistent_petname_xyz");
2392 assert!(result.is_err());
2393 }
2394
2395 #[test]
2397 fn test_private_key_is_nip44_encrypted_not_plaintext() {
2398 use nostr_sdk::prelude::{nip44, Keys};
2399
2400 let keys = Keys::generate();
2402 let pubkey = keys.public_key();
2403
2404 let chk_key: [u8; 32] = [
2406 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab,
2407 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67,
2408 0x89, 0xab, 0xcd, 0xef,
2409 ];
2410 let plaintext_hex = hex::encode(&chk_key);
2411
2412 let encrypted = nip44::encrypt(
2414 keys.secret_key(),
2415 &pubkey,
2416 &plaintext_hex,
2417 nip44::Version::V2,
2418 )
2419 .expect("NIP-44 encryption should succeed");
2420
2421 assert_ne!(
2423 encrypted, plaintext_hex,
2424 "NIP-44 encrypted value must differ from plaintext CHK hex"
2425 );
2426
2427 assert!(
2429 !encrypted.contains(&plaintext_hex),
2430 "Encrypted value should not contain plaintext hex"
2431 );
2432
2433 let decrypted = nip44::decrypt(keys.secret_key(), &pubkey, &encrypted)
2435 .expect("NIP-44 decryption should succeed");
2436
2437 assert_eq!(
2438 decrypted, plaintext_hex,
2439 "Decrypted value should match original plaintext hex"
2440 );
2441 }
2442
2443 #[test]
2445 fn test_encryption_modes_produce_different_values() {
2446 use nostr_sdk::prelude::{nip44, Keys};
2447
2448 let keys = Keys::generate();
2449 let pubkey = keys.public_key();
2450
2451 let chk_key: [u8; 32] = [0xaa; 32];
2453 let plaintext_hex = hex::encode(&chk_key);
2454
2455 let public_value = plaintext_hex.clone();
2457
2458 let private_value = nip44::encrypt(
2462 keys.secret_key(),
2463 &pubkey,
2464 &plaintext_hex,
2465 nip44::Version::V2,
2466 )
2467 .expect("NIP-44 encryption should succeed");
2468
2469 assert_ne!(
2471 private_value, public_value,
2472 "Private (NIP-44) value must differ from public (plaintext) value"
2473 );
2474
2475 assert!(
2477 private_value.len() != 64,
2478 "NIP-44 output should not be 64 chars like hex CHK"
2479 );
2480 }
2481
2482 fn build_test_pr_event(keys: &Keys, created_at_secs: u64) -> Event {
2483 EventBuilder::new(
2484 Kind::Custom(KIND_PULL_REQUEST),
2485 "",
2486 [Tag::custom(
2487 TagKind::custom("subject"),
2488 vec!["test pr".to_string()],
2489 )],
2490 )
2491 .custom_created_at(Timestamp::from_secs(created_at_secs))
2492 .to_event(keys)
2493 .unwrap()
2494 }
2495
2496 fn build_test_status_event(
2497 keys: &Keys,
2498 kind: u16,
2499 pr_event_id: &str,
2500 created_at_secs: u64,
2501 ) -> Event {
2502 EventBuilder::new(
2503 Kind::Custom(kind),
2504 "",
2505 [Tag::custom(
2506 TagKind::custom("e"),
2507 vec![pr_event_id.to_string()],
2508 )],
2509 )
2510 .custom_created_at(Timestamp::from_secs(created_at_secs))
2511 .to_event(keys)
2512 .unwrap()
2513 }
2514
2515 #[test]
2516 fn test_pull_request_state_from_latest_status_kind_defaults_to_open() {
2517 assert_eq!(
2518 PullRequestState::from_latest_status_kind(None),
2519 PullRequestState::Open
2520 );
2521 assert_eq!(
2522 PullRequestState::from_latest_status_kind(Some(KIND_STATUS_OPEN)),
2523 PullRequestState::Open
2524 );
2525 assert_eq!(
2526 PullRequestState::from_latest_status_kind(Some(9999)),
2527 PullRequestState::Open
2528 );
2529 }
2530
2531 #[test]
2532 fn test_pull_request_state_from_status_kind_maps_known_kinds() {
2533 assert_eq!(
2534 PullRequestState::from_status_kind(KIND_STATUS_APPLIED),
2535 Some(PullRequestState::Applied)
2536 );
2537 assert_eq!(
2538 PullRequestState::from_status_kind(KIND_STATUS_CLOSED),
2539 Some(PullRequestState::Closed)
2540 );
2541 assert_eq!(
2542 PullRequestState::from_status_kind(KIND_STATUS_DRAFT),
2543 Some(PullRequestState::Draft)
2544 );
2545 assert_eq!(PullRequestState::from_status_kind(9999), None);
2546 }
2547
2548 #[test]
2549 fn test_pull_request_state_filter_includes_only_requested_state() {
2550 assert!(PullRequestStateFilter::Open.includes(PullRequestState::Open));
2551 assert!(!PullRequestStateFilter::Open.includes(PullRequestState::Closed));
2552 assert!(PullRequestStateFilter::All.includes(PullRequestState::Open));
2553 assert!(PullRequestStateFilter::All.includes(PullRequestState::Applied));
2554 assert!(PullRequestStateFilter::All.includes(PullRequestState::Closed));
2555 assert!(PullRequestStateFilter::All.includes(PullRequestState::Draft));
2556 }
2557
2558 #[test]
2559 fn test_pull_request_state_strings_are_stable() {
2560 assert_eq!(PullRequestState::Open.as_str(), "open");
2561 assert_eq!(PullRequestState::Applied.as_str(), "applied");
2562 assert_eq!(PullRequestState::Closed.as_str(), "closed");
2563 assert_eq!(PullRequestState::Draft.as_str(), "draft");
2564
2565 assert_eq!(PullRequestStateFilter::Open.as_str(), "open");
2566 assert_eq!(PullRequestStateFilter::Applied.as_str(), "applied");
2567 assert_eq!(PullRequestStateFilter::Closed.as_str(), "closed");
2568 assert_eq!(PullRequestStateFilter::Draft.as_str(), "draft");
2569 assert_eq!(PullRequestStateFilter::All.as_str(), "all");
2570 }
2571
2572 #[test]
2573 fn test_latest_trusted_pr_status_kinds_ignores_untrusted_signers() {
2574 let repo_owner = Keys::generate();
2575 let pr_author = Keys::generate();
2576 let attacker = Keys::generate();
2577
2578 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2579 let spoofed_status = build_test_status_event(
2580 &attacker,
2581 KIND_STATUS_CLOSED,
2582 &pr_event.id.to_hex(),
2583 1_700_100_010,
2584 );
2585
2586 let statuses = latest_trusted_pr_status_kinds(
2587 &[pr_event.clone()],
2588 &[spoofed_status],
2589 &repo_owner.public_key().to_hex(),
2590 );
2591
2592 assert!(
2593 !statuses.contains_key(&pr_event.id.to_hex()),
2594 "untrusted status signer should be ignored"
2595 );
2596 }
2597
2598 #[test]
2599 fn test_latest_trusted_pr_status_kinds_accepts_pr_author() {
2600 let repo_owner = Keys::generate();
2601 let pr_author = Keys::generate();
2602
2603 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2604 let author_status = build_test_status_event(
2605 &pr_author,
2606 KIND_STATUS_CLOSED,
2607 &pr_event.id.to_hex(),
2608 1_700_100_010,
2609 );
2610
2611 let statuses = latest_trusted_pr_status_kinds(
2612 &[pr_event.clone()],
2613 &[author_status],
2614 &repo_owner.public_key().to_hex(),
2615 );
2616
2617 assert_eq!(
2618 statuses.get(&pr_event.id.to_hex()).copied(),
2619 Some(KIND_STATUS_CLOSED)
2620 );
2621 }
2622
2623 #[test]
2624 fn test_latest_trusted_pr_status_kinds_rejects_applied_from_pr_author() {
2625 let repo_owner = Keys::generate();
2626 let pr_author = Keys::generate();
2627
2628 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2629 let author_applied = build_test_status_event(
2630 &pr_author,
2631 KIND_STATUS_APPLIED,
2632 &pr_event.id.to_hex(),
2633 1_700_100_010,
2634 );
2635
2636 let statuses = latest_trusted_pr_status_kinds(
2637 &[pr_event.clone()],
2638 &[author_applied],
2639 &repo_owner.public_key().to_hex(),
2640 );
2641
2642 assert!(
2643 !statuses.contains_key(&pr_event.id.to_hex()),
2644 "PR author must not be able to self-mark applied"
2645 );
2646 }
2647
2648 #[test]
2649 fn test_latest_trusted_pr_status_kinds_accepts_repo_owner() {
2650 let repo_owner = Keys::generate();
2651 let pr_author = Keys::generate();
2652
2653 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2654 let owner_status = build_test_status_event(
2655 &repo_owner,
2656 KIND_STATUS_APPLIED,
2657 &pr_event.id.to_hex(),
2658 1_700_100_010,
2659 );
2660
2661 let statuses = latest_trusted_pr_status_kinds(
2662 &[pr_event.clone()],
2663 &[owner_status],
2664 &repo_owner.public_key().to_hex(),
2665 );
2666
2667 assert_eq!(
2668 statuses.get(&pr_event.id.to_hex()).copied(),
2669 Some(KIND_STATUS_APPLIED)
2670 );
2671 }
2672
2673 #[test]
2674 fn test_latest_trusted_pr_status_kinds_preserves_owner_applied_over_newer_author_status() {
2675 let repo_owner = Keys::generate();
2676 let pr_author = Keys::generate();
2677
2678 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2679 let owner_applied = build_test_status_event(
2680 &repo_owner,
2681 KIND_STATUS_APPLIED,
2682 &pr_event.id.to_hex(),
2683 1_700_100_010,
2684 );
2685 let newer_author_open = build_test_status_event(
2686 &pr_author,
2687 KIND_STATUS_OPEN,
2688 &pr_event.id.to_hex(),
2689 1_700_100_020,
2690 );
2691
2692 let statuses = latest_trusted_pr_status_kinds(
2693 &[pr_event.clone()],
2694 &[owner_applied, newer_author_open],
2695 &repo_owner.public_key().to_hex(),
2696 );
2697
2698 assert_eq!(
2699 statuses.get(&pr_event.id.to_hex()).copied(),
2700 Some(KIND_STATUS_APPLIED),
2701 "owner-applied status should remain authoritative even if author publishes a newer status"
2702 );
2703 }
2704
2705 #[test]
2706 fn test_latest_trusted_pr_status_kinds_ignores_newer_untrusted_status() {
2707 let repo_owner = Keys::generate();
2708 let pr_author = Keys::generate();
2709 let attacker = Keys::generate();
2710
2711 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2712 let trusted_open = build_test_status_event(
2713 &repo_owner,
2714 KIND_STATUS_OPEN,
2715 &pr_event.id.to_hex(),
2716 1_700_100_010,
2717 );
2718 let spoofed_closed = build_test_status_event(
2719 &attacker,
2720 KIND_STATUS_CLOSED,
2721 &pr_event.id.to_hex(),
2722 1_700_100_020,
2723 );
2724
2725 let statuses = latest_trusted_pr_status_kinds(
2726 &[pr_event.clone()],
2727 &[trusted_open, spoofed_closed],
2728 &repo_owner.public_key().to_hex(),
2729 );
2730
2731 assert_eq!(
2732 statuses.get(&pr_event.id.to_hex()).copied(),
2733 Some(KIND_STATUS_OPEN)
2734 );
2735 }
2736}