1use anyhow::{Context, Result};
47use hashtree_blossom::BlossomClient;
48use hashtree_core::{decode_tree_node, decrypt_chk, LinkType};
49use nostr_sdk::prelude::*;
50use serde::Deserialize;
51use std::collections::HashMap;
52use std::time::Duration;
53use tracing::{debug, info, warn};
54
55pub const KIND_APP_DATA: u16 = 30078;
57
58pub const KIND_PULL_REQUEST: u16 = 1618;
60pub const KIND_STATUS_OPEN: u16 = 1630;
61pub const KIND_STATUS_APPLIED: u16 = 1631;
62pub const KIND_STATUS_CLOSED: u16 = 1632;
63pub const KIND_STATUS_DRAFT: u16 = 1633;
64pub const KIND_REPO_ANNOUNCEMENT: u16 = 30617;
65
66pub const LABEL_HASHTREE: &str = "hashtree";
68pub const LABEL_GIT: &str = "git";
69
70#[derive(Debug, Clone, Copy, PartialEq, Eq)]
72pub enum PullRequestState {
73 Open,
74 Applied,
75 Closed,
76 Draft,
77}
78
79impl PullRequestState {
80 pub fn as_str(self) -> &'static str {
81 match self {
82 PullRequestState::Open => "open",
83 PullRequestState::Applied => "applied",
84 PullRequestState::Closed => "closed",
85 PullRequestState::Draft => "draft",
86 }
87 }
88
89 fn from_status_kind(status_kind: u16) -> Option<Self> {
90 match status_kind {
91 KIND_STATUS_OPEN => Some(PullRequestState::Open),
92 KIND_STATUS_APPLIED => Some(PullRequestState::Applied),
93 KIND_STATUS_CLOSED => Some(PullRequestState::Closed),
94 KIND_STATUS_DRAFT => Some(PullRequestState::Draft),
95 _ => None,
96 }
97 }
98
99 fn from_latest_status_kind(status_kind: Option<u16>) -> Self {
100 status_kind
101 .and_then(Self::from_status_kind)
102 .unwrap_or(PullRequestState::Open)
103 }
104}
105
106#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
108pub enum PullRequestStateFilter {
109 #[default]
110 Open,
111 Applied,
112 Closed,
113 Draft,
114 All,
115}
116
117impl PullRequestStateFilter {
118 pub fn as_str(self) -> &'static str {
119 match self {
120 PullRequestStateFilter::Open => "open",
121 PullRequestStateFilter::Applied => "applied",
122 PullRequestStateFilter::Closed => "closed",
123 PullRequestStateFilter::Draft => "draft",
124 PullRequestStateFilter::All => "all",
125 }
126 }
127
128 fn includes(self, state: PullRequestState) -> bool {
129 match self {
130 PullRequestStateFilter::All => true,
131 PullRequestStateFilter::Open => state == PullRequestState::Open,
132 PullRequestStateFilter::Applied => state == PullRequestState::Applied,
133 PullRequestStateFilter::Closed => state == PullRequestState::Closed,
134 PullRequestStateFilter::Draft => state == PullRequestState::Draft,
135 }
136 }
137}
138
139#[derive(Debug, Clone)]
141pub struct PullRequestListItem {
142 pub event_id: String,
143 pub author_pubkey: String,
144 pub state: PullRequestState,
145 pub subject: Option<String>,
146 pub commit_tip: Option<String>,
147 pub branch: Option<String>,
148 pub target_branch: Option<String>,
149 pub created_at: u64,
150}
151
152type FetchedRefs = (HashMap<String, String>, Option<String>, Option<[u8; 32]>);
153
154#[derive(Debug, Clone, PartialEq, Eq)]
155struct GitRepoAnnouncement {
156 repo_name: String,
157 created_at: Timestamp,
158 event_id: EventId,
159}
160
161#[derive(Debug, Clone)]
163pub struct StoredKey {
164 pub secret_hex: Option<String>,
166 pub pubkey_hex: String,
168 pub petname: Option<String>,
170}
171
172impl StoredKey {
173 pub fn from_secret_hex(secret_hex: &str, petname: Option<String>) -> Result<Self> {
175 use secp256k1::{Secp256k1, SecretKey};
176
177 let sk_bytes = hex::decode(secret_hex).context("Invalid hex in secret key")?;
178 let sk = SecretKey::from_slice(&sk_bytes).context("Invalid secret key")?;
179 let secp = Secp256k1::new();
180 let pk = sk.x_only_public_key(&secp).0;
181 let pubkey_hex = hex::encode(pk.serialize());
182
183 Ok(Self {
184 secret_hex: Some(secret_hex.to_string()),
185 pubkey_hex,
186 petname,
187 })
188 }
189
190 pub fn from_nsec(nsec: &str, petname: Option<String>) -> Result<Self> {
192 let secret_key =
193 SecretKey::parse(nsec).map_err(|e| anyhow::anyhow!("Invalid nsec format: {}", e))?;
194 let secret_hex = hex::encode(secret_key.to_secret_bytes());
195 Self::from_secret_hex(&secret_hex, petname)
196 }
197
198 pub fn from_pubkey_hex(pubkey_hex: &str, petname: Option<String>) -> Result<Self> {
200 let pubkey = PublicKey::from_hex(pubkey_hex)
201 .map_err(|e| anyhow::anyhow!("Invalid pubkey hex: {}", e))?;
202
203 Ok(Self {
204 secret_hex: None,
205 pubkey_hex: hex::encode(pubkey.to_bytes()),
206 petname,
207 })
208 }
209
210 pub fn from_npub(npub: &str, petname: Option<String>) -> Result<Self> {
212 let pubkey =
213 PublicKey::parse(npub).map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
214
215 Ok(Self {
216 secret_hex: None,
217 pubkey_hex: hex::encode(pubkey.to_bytes()),
218 petname,
219 })
220 }
221}
222
223#[derive(Clone, Copy)]
224enum IdentityFileKind {
225 Keys,
226 Aliases,
227}
228
229fn ensure_aliases_file_hint() {
230 let aliases_path = hashtree_config::get_aliases_path();
231 if aliases_path.exists() {
232 return;
233 }
234
235 let Some(parent) = aliases_path.parent() else {
236 return;
237 };
238
239 if !parent.exists() {
240 return;
241 }
242
243 let template = concat!(
244 "# Public read-only aliases for repos you clone or fetch.\n",
245 "# Format: npub1... alias\n",
246 "# Example:\n",
247 "# npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm sirius\n",
248 );
249
250 let _ = std::fs::OpenOptions::new()
251 .write(true)
252 .create_new(true)
253 .open(&aliases_path)
254 .and_then(|mut file| std::io::Write::write_all(&mut file, template.as_bytes()));
255}
256
257fn parse_identity_entry(
258 raw: &str,
259 petname: Option<String>,
260 kind: IdentityFileKind,
261) -> Option<StoredKey> {
262 let key = match kind {
263 IdentityFileKind::Keys => {
264 if raw.starts_with("nsec1") {
265 StoredKey::from_nsec(raw, petname)
266 } else if raw.starts_with("npub1") {
267 StoredKey::from_npub(raw, petname)
268 } else if raw.len() == 64 {
269 StoredKey::from_secret_hex(raw, petname)
270 } else {
271 return None;
272 }
273 }
274 IdentityFileKind::Aliases => {
275 if raw.starts_with("npub1") {
276 StoredKey::from_npub(raw, petname)
277 } else if raw.len() == 64 {
278 StoredKey::from_pubkey_hex(raw, petname)
279 } else {
280 return None;
281 }
282 }
283 };
284
285 key.ok()
286}
287
288fn load_identities_from_path(path: &std::path::Path, kind: IdentityFileKind) -> Vec<StoredKey> {
289 let mut keys = Vec::new();
290
291 if let Ok(content) = std::fs::read_to_string(path) {
292 for entry in hashtree_config::parse_keys_file(&content) {
293 if let Some(key) = parse_identity_entry(&entry.secret, entry.alias, kind) {
294 debug!(
295 "Loaded identity: pubkey={}, petname={:?}, has_secret={}",
296 key.pubkey_hex,
297 key.petname,
298 key.secret_hex.is_some()
299 );
300 keys.push(key);
301 }
302 }
303 }
304
305 keys
306}
307
308fn resolve_self_identity(keys: &[StoredKey]) -> Option<(String, Option<String>)> {
309 keys.iter()
310 .find(|k| k.petname.as_deref() == Some("self") && k.secret_hex.is_some())
311 .or_else(|| {
312 keys.iter()
313 .find(|k| k.petname.as_deref() == Some("default") && k.secret_hex.is_some())
314 })
315 .or_else(|| keys.iter().find(|k| k.secret_hex.is_some()))
316 .map(|key| (key.pubkey_hex.clone(), key.secret_hex.clone()))
317}
318
319pub fn load_keys() -> Vec<StoredKey> {
321 ensure_aliases_file_hint();
322
323 let mut keys =
324 load_identities_from_path(&hashtree_config::get_keys_path(), IdentityFileKind::Keys);
325 keys.extend(load_identities_from_path(
326 &hashtree_config::get_aliases_path(),
327 IdentityFileKind::Aliases,
328 ));
329
330 keys
331}
332
333pub fn resolve_identity(identifier: &str) -> Result<(String, Option<String>)> {
340 let keys = load_keys();
341
342 if identifier == "self" {
344 if let Some(resolved) = resolve_self_identity(&keys) {
345 return Ok(resolved);
346 }
347 let new_key = generate_and_save_key("self")?;
349 info!("Generated new identity: npub1{}", &new_key.pubkey_hex[..12]);
350 return Ok((new_key.pubkey_hex, new_key.secret_hex));
351 }
352
353 for key in &keys {
355 if key.petname.as_deref() == Some(identifier) {
356 return Ok((key.pubkey_hex.clone(), key.secret_hex.clone()));
357 }
358 }
359
360 if identifier.starts_with("npub1") {
362 let pk = PublicKey::parse(identifier)
363 .map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
364 let pubkey_hex = hex::encode(pk.to_bytes());
365
366 let secret = keys
368 .iter()
369 .find(|k| k.pubkey_hex == pubkey_hex)
370 .and_then(|k| k.secret_hex.clone());
371
372 return Ok((pubkey_hex, secret));
373 }
374
375 if identifier.len() == 64 && hex::decode(identifier).is_ok() {
377 let secret = keys
378 .iter()
379 .find(|k| k.pubkey_hex == identifier)
380 .and_then(|k| k.secret_hex.clone());
381
382 return Ok((identifier.to_string(), secret));
383 }
384
385 anyhow::bail!(
387 "Unknown identity '{}'. Add it to ~/.hashtree/aliases (preferred) or ~/.hashtree/keys, or use a pubkey/npub.",
388 identifier
389 )
390}
391
392fn generate_and_save_key(petname: &str) -> Result<StoredKey> {
394 use std::fs::{self, OpenOptions};
395 use std::io::Write;
396
397 let keys = nostr_sdk::Keys::generate();
399 let secret_hex = hex::encode(keys.secret_key().to_secret_bytes());
400 let pubkey_hex = hex::encode(keys.public_key().to_bytes());
401
402 let keys_path = hashtree_config::get_keys_path();
404 if let Some(parent) = keys_path.parent() {
405 fs::create_dir_all(parent)?;
406 }
407 ensure_aliases_file_hint();
408
409 let mut file = OpenOptions::new()
411 .create(true)
412 .append(true)
413 .open(&keys_path)?;
414
415 let nsec = keys
417 .secret_key()
418 .to_bech32()
419 .map_err(|e| anyhow::anyhow!("Failed to encode nsec: {}", e))?;
420 writeln!(file, "{} {}", nsec, petname)?;
421
422 info!(
423 "Saved new key to {:?} with petname '{}'",
424 keys_path, petname
425 );
426
427 Ok(StoredKey {
428 secret_hex: Some(secret_hex),
429 pubkey_hex,
430 petname: Some(petname.to_string()),
431 })
432}
433
434use hashtree_config::Config;
435
436fn pick_latest_event<'a, I>(events: I) -> Option<&'a Event>
437where
438 I: IntoIterator<Item = &'a Event>,
439{
440 events
442 .into_iter()
443 .max_by_key(|event| (event.created_at, event.id))
444}
445
446fn is_matching_repo_event(event: &Event, repo_name: &str) -> bool {
447 let has_hashtree_label = event.tags.iter().any(|tag| {
448 let slice = tag.as_slice();
449 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_HASHTREE
450 });
451
452 if !has_hashtree_label {
453 return false;
454 }
455
456 event.tags.iter().any(|tag| {
457 let slice = tag.as_slice();
458 slice.len() >= 2 && slice[0].as_str() == "d" && slice[1].as_str() == repo_name
459 })
460}
461
462fn pick_latest_repo_event<'a, I>(events: I, repo_name: &str) -> Option<&'a Event>
463where
464 I: IntoIterator<Item = &'a Event>,
465{
466 pick_latest_event(
467 events
468 .into_iter()
469 .filter(|event| is_matching_repo_event(event, repo_name)),
470 )
471}
472
473fn git_repo_name(event: &Event) -> Option<&str> {
474 let has_hashtree_label = event.tags.iter().any(|tag| {
475 let slice = tag.as_slice();
476 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_HASHTREE
477 });
478 let has_git_label = event.tags.iter().any(|tag| {
479 let slice = tag.as_slice();
480 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_GIT
481 });
482 if !has_hashtree_label || !has_git_label {
483 return None;
484 }
485
486 event.tags.iter().find_map(|tag| {
487 let slice = tag.as_slice();
488 if slice.len() < 2 || slice[0].as_str() != "d" {
489 return None;
490 }
491 let repo_name = slice[1].as_str();
492 if repo_name.is_empty() {
493 None
494 } else {
495 Some(repo_name)
496 }
497 })
498}
499
500fn list_git_repo_announcements(events: &[Event]) -> Vec<GitRepoAnnouncement> {
501 let mut latest_by_repo: HashMap<String, (Timestamp, EventId)> = HashMap::new();
502
503 for event in events {
504 let Some(repo_name) = git_repo_name(event) else {
505 continue;
506 };
507
508 let entry = latest_by_repo
509 .entry(repo_name.to_string())
510 .or_insert((event.created_at, event.id));
511 if (event.created_at, event.id) > (entry.0, entry.1) {
512 *entry = (event.created_at, event.id);
513 }
514 }
515
516 let mut repos: Vec<GitRepoAnnouncement> = latest_by_repo
517 .into_iter()
518 .map(|(repo_name, (created_at, event_id))| GitRepoAnnouncement {
519 repo_name,
520 created_at,
521 event_id,
522 })
523 .collect();
524 repos.sort_by(|left, right| left.repo_name.cmp(&right.repo_name));
525 repos
526}
527
528fn build_git_repo_list_filter(author: PublicKey) -> Filter {
529 Filter::new()
530 .kind(Kind::Custom(KIND_APP_DATA))
531 .author(author)
532 .custom_tag(SingleLetterTag::lowercase(Alphabet::L), vec![LABEL_GIT])
533 .limit(500)
534}
535
536fn build_repo_event_filter(author: PublicKey, repo_name: &str) -> Filter {
537 Filter::new()
538 .kind(Kind::Custom(KIND_APP_DATA))
539 .author(author)
540 .custom_tag(SingleLetterTag::lowercase(Alphabet::D), vec![repo_name])
541 .custom_tag(
542 SingleLetterTag::lowercase(Alphabet::L),
543 vec![LABEL_HASHTREE],
544 )
545 .limit(50)
546}
547
548fn next_replaceable_created_at(now: Timestamp, latest_existing: Option<Timestamp>) -> Timestamp {
549 match latest_existing {
550 Some(latest) if latest >= now => Timestamp::from_secs(latest.as_u64().saturating_add(1)),
551 _ => now,
552 }
553}
554
555async fn latest_repo_event_created_at(
556 client: &Client,
557 author: PublicKey,
558 repo_name: &str,
559 timeout: Duration,
560) -> Option<Timestamp> {
561 let events = client
562 .get_events_of(
563 vec![build_repo_event_filter(author, repo_name)],
564 EventSource::relays(Some(timeout)),
565 )
566 .await
567 .ok()?;
568 pick_latest_repo_event(events.iter(), repo_name).map(|event| event.created_at)
569}
570
571fn append_repo_discovery_labels(tags: &mut Vec<Tag>, repo_name: &str) {
572 tags.push(Tag::custom(
573 TagKind::custom("l"),
574 vec![LABEL_GIT.to_string()],
575 ));
576
577 let parts: Vec<&str> = repo_name.split('/').collect();
580 for i in 1..parts.len() {
581 let prefix = parts[..i].join("/");
582 tags.push(Tag::custom(TagKind::custom("l"), vec![prefix]));
583 }
584}
585
586fn relay_host(url: &str) -> Option<&str> {
587 let stripped = url
588 .strip_prefix("ws://")
589 .or_else(|| url.strip_prefix("wss://"))
590 .or_else(|| url.strip_prefix("http://"))
591 .or_else(|| url.strip_prefix("https://"))
592 .unwrap_or(url);
593 let authority = stripped.split('/').next().unwrap_or(stripped);
594 if authority.is_empty() {
595 return None;
596 }
597
598 if let Some(host) = authority.strip_prefix('[') {
599 return host.split(']').next().filter(|value| !value.is_empty());
600 }
601
602 authority
603 .split(':')
604 .next()
605 .map(str::trim)
606 .filter(|value| !value.is_empty())
607}
608
609fn is_local_relay_url(url: &str) -> bool {
610 relay_host(url).is_some_and(|host| {
611 host.eq_ignore_ascii_case("localhost")
612 || host == "127.0.0.1"
613 || host == "::1"
614 || host.starts_with("127.")
615 })
616}
617
618fn has_non_local_relay(urls: &[String]) -> bool {
619 urls.iter().any(|url| !is_local_relay_url(url))
620}
621
622fn validate_repo_publish_relays(configured: &[String], connected: &[String]) -> Result<()> {
623 if connected.is_empty() {
624 anyhow::bail!(
625 "No relay confirmed repo publication. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml."
626 );
627 }
628
629 if has_non_local_relay(configured) && !has_non_local_relay(connected) {
630 anyhow::bail!(
631 "No public relay confirmed repo publication; local relays only: {}. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml.",
632 connected.join(", ")
633 );
634 }
635
636 Ok(())
637}
638
639fn latest_trusted_pr_status_kinds(
640 pr_events: &[Event],
641 status_events: &[Event],
642 repo_owner_pubkey: &str,
643) -> HashMap<String, u16> {
644 let pr_authors: HashMap<String, String> = pr_events
645 .iter()
646 .map(|event| (event.id.to_hex(), event.pubkey.to_hex()))
647 .collect();
648
649 let mut trusted_statuses: HashMap<String, Vec<&Event>> = HashMap::new();
650 for status in status_events {
651 let signer_pubkey = status.pubkey.to_hex();
652 for tag in status.tags.iter() {
653 let slice = tag.as_slice();
654 if slice.len() < 2 || slice[0].as_str() != "e" {
655 continue;
656 }
657
658 let pr_id = slice[1].to_string();
659 let Some(pr_author_pubkey) = pr_authors.get(&pr_id) else {
660 continue;
661 };
662
663 let trusted = if status.kind.as_u16() == KIND_STATUS_APPLIED {
664 signer_pubkey == repo_owner_pubkey
666 } else {
667 signer_pubkey == *pr_author_pubkey || signer_pubkey == repo_owner_pubkey
668 };
669 if trusted {
670 trusted_statuses.entry(pr_id).or_default().push(status);
671 }
672 }
673 }
674
675 let mut latest_status = HashMap::new();
676 for (pr_id, events) in trusted_statuses {
677 if let Some(applied) = pick_latest_event(
680 events
681 .iter()
682 .copied()
683 .filter(|event| event.kind.as_u16() == KIND_STATUS_APPLIED),
684 ) {
685 latest_status.insert(pr_id, applied.kind.as_u16());
686 } else if let Some(latest) = pick_latest_event(events.iter().copied()) {
687 latest_status.insert(pr_id, latest.kind.as_u16());
688 }
689 }
690
691 latest_status
692}
693
694#[derive(Debug, Clone)]
696pub struct RelayResult {
697 #[allow(dead_code)]
699 pub configured: Vec<String>,
700 pub connected: Vec<String>,
702 pub failed: Vec<String>,
704}
705
706#[derive(Debug, Clone)]
708pub struct BlossomResult {
709 #[allow(dead_code)]
711 pub configured: Vec<String>,
712 pub succeeded: Vec<String>,
714 pub failed: Vec<String>,
716}
717
718pub struct NostrClient {
720 pubkey: String,
721 keys: Option<Keys>,
723 relays: Vec<String>,
724 blossom: BlossomClient,
725 cached_refs: HashMap<String, HashMap<String, String>>,
727 cached_root_hash: HashMap<String, String>,
729 cached_encryption_key: HashMap<String, [u8; 32]>,
731 url_secret: Option<[u8; 32]>,
734 is_private: bool,
736 local_daemon_url: Option<String>,
738}
739
740#[derive(Debug, Clone, Default)]
741struct RootEventData {
742 root_hash: String,
743 encryption_key: Option<[u8; 32]>,
744 key_tag_name: Option<String>,
745 self_encrypted_ciphertext: Option<String>,
746}
747
748#[derive(Debug, Deserialize)]
749struct DaemonResolveResponse {
750 hash: Option<String>,
751 #[serde(default, rename = "key_tag")]
752 key: Option<String>,
753 #[serde(default, rename = "encryptedKey")]
754 encrypted_key: Option<String>,
755 #[serde(default, rename = "selfEncryptedKey")]
756 self_encrypted_key: Option<String>,
757 #[serde(default)]
758 source: Option<String>,
759}
760
761impl NostrClient {
762 pub fn new(
764 pubkey: &str,
765 secret_key: Option<String>,
766 url_secret: Option<[u8; 32]>,
767 is_private: bool,
768 config: &Config,
769 ) -> Result<Self> {
770 let _ = rustls::crypto::ring::default_provider().install_default();
772
773 let secret_key = secret_key.or_else(|| std::env::var("NOSTR_SECRET_KEY").ok());
775
776 let keys = if let Some(ref secret_hex) = secret_key {
778 let secret_bytes = hex::decode(secret_hex).context("Invalid secret key hex")?;
779 let secret = nostr::SecretKey::from_slice(&secret_bytes)
780 .map_err(|e| anyhow::anyhow!("Invalid secret key: {}", e))?;
781 Some(Keys::new(secret))
782 } else {
783 None
784 };
785
786 let blossom_keys = keys.clone().unwrap_or_else(Keys::generate);
789 let blossom = BlossomClient::new(blossom_keys).with_timeout(Duration::from_secs(30));
790
791 tracing::info!(
792 "BlossomClient created with read_servers: {:?}, write_servers: {:?}",
793 blossom.read_servers(),
794 blossom.write_servers()
795 );
796
797 let relays = hashtree_config::resolve_relays(
798 &config.nostr.relays,
799 Some(config.server.bind_address.as_str()),
800 );
801 let local_daemon_url =
802 hashtree_config::detect_local_daemon_url(Some(config.server.bind_address.as_str()))
803 .or_else(|| {
804 config
805 .blossom
806 .read_servers
807 .iter()
808 .find(|url| {
809 url.starts_with("http://127.0.0.1:")
810 || url.starts_with("http://localhost:")
811 })
812 .cloned()
813 });
814
815 Ok(Self {
816 pubkey: pubkey.to_string(),
817 keys,
818 relays,
819 blossom,
820 cached_refs: HashMap::new(),
821 cached_root_hash: HashMap::new(),
822 cached_encryption_key: HashMap::new(),
823 url_secret,
824 is_private,
825 local_daemon_url,
826 })
827 }
828
829 fn format_repo_author(pubkey_hex: &str) -> String {
830 PublicKey::from_hex(pubkey_hex)
831 .ok()
832 .and_then(|pk| pk.to_bech32().ok())
833 .unwrap_or_else(|| pubkey_hex.to_string())
834 }
835
836 #[allow(dead_code)]
838 pub fn can_sign(&self) -> bool {
839 self.keys.is_some()
840 }
841
842 pub fn list_repos(&self) -> Result<Vec<String>> {
843 let rt = tokio::runtime::Builder::new_multi_thread()
844 .enable_all()
845 .build()
846 .context("Failed to create tokio runtime")?;
847
848 let result = rt.block_on(self.list_repos_async());
849 rt.shutdown_timeout(Duration::from_millis(500));
850 result
851 }
852
853 pub async fn list_repos_async(&self) -> Result<Vec<String>> {
854 let client = Client::default();
855
856 for relay in &self.relays {
857 if let Err(e) = client.add_relay(relay).await {
858 warn!("Failed to add relay {}: {}", relay, e);
859 }
860 }
861 client.connect().await;
862
863 let start = std::time::Instant::now();
864 loop {
865 let relays = client.relays().await;
866 let mut connected = false;
867 for relay in relays.values() {
868 if relay.is_connected().await {
869 connected = true;
870 break;
871 }
872 }
873 if connected {
874 break;
875 }
876 if start.elapsed() > Duration::from_secs(2) {
877 let _ = client.disconnect().await;
878 return Err(anyhow::anyhow!(
879 "Failed to connect to any relay while listing repos"
880 ));
881 }
882 tokio::time::sleep(Duration::from_millis(50)).await;
883 }
884
885 let author = PublicKey::from_hex(&self.pubkey)
886 .map_err(|e| anyhow::anyhow!("Invalid pubkey: {}", e))?;
887 let filter = build_git_repo_list_filter(author);
888
889 let events = match tokio::time::timeout(
890 Duration::from_secs(3),
891 client.get_events_of(vec![filter], EventSource::relays(None)),
892 )
893 .await
894 {
895 Ok(Ok(events)) => events,
896 Ok(Err(e)) => {
897 let _ = client.disconnect().await;
898 return Err(anyhow::anyhow!(
899 "Failed to fetch git repo events from relays: {}",
900 e
901 ));
902 }
903 Err(_) => {
904 let _ = client.disconnect().await;
905 return Err(anyhow::anyhow!(
906 "Timed out fetching git repo events from relays"
907 ));
908 }
909 };
910
911 let _ = client.disconnect().await;
912
913 Ok(list_git_repo_announcements(&events)
914 .into_iter()
915 .map(|repo| repo.repo_name)
916 .collect())
917 }
918
919 pub fn fetch_refs(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
922 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 10)?;
923 Ok(refs)
924 }
925
926 #[allow(dead_code)]
929 pub fn fetch_refs_quick(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
930 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 3)?;
931 Ok(refs)
932 }
933
934 #[allow(dead_code)]
937 pub fn fetch_refs_with_root(&mut self, repo_name: &str) -> Result<FetchedRefs> {
938 self.fetch_refs_with_timeout(repo_name, 10)
939 }
940
941 fn fetch_refs_with_timeout(
943 &mut self,
944 repo_name: &str,
945 timeout_secs: u64,
946 ) -> Result<FetchedRefs> {
947 debug!(
948 "Fetching refs for {} from {} (timeout {}s)",
949 repo_name, self.pubkey, timeout_secs
950 );
951
952 if let Some(refs) = self.cached_refs.get(repo_name) {
954 let root = self.cached_root_hash.get(repo_name).cloned();
955 let key = self.cached_encryption_key.get(repo_name).cloned();
956 return Ok((refs.clone(), root, key));
957 }
958
959 let rt = tokio::runtime::Builder::new_multi_thread()
962 .enable_all()
963 .build()
964 .context("Failed to create tokio runtime")?;
965
966 let (refs, root_hash, encryption_key) =
967 rt.block_on(self.fetch_refs_async_with_timeout(repo_name, timeout_secs))?;
968 self.cached_refs.insert(repo_name.to_string(), refs.clone());
969 if let Some(ref root) = root_hash {
970 self.cached_root_hash
971 .insert(repo_name.to_string(), root.clone());
972 }
973 if let Some(key) = encryption_key {
974 self.cached_encryption_key
975 .insert(repo_name.to_string(), key);
976 }
977 Ok((refs, root_hash, encryption_key))
978 }
979
980 fn parse_root_event_data_from_event(event: &Event) -> RootEventData {
981 let root_hash = event
982 .tags
983 .iter()
984 .find(|t| t.as_slice().len() >= 2 && t.as_slice()[0].as_str() == "hash")
985 .map(|t| t.as_slice()[1].to_string())
986 .unwrap_or_else(|| event.content.to_string());
987
988 let (encryption_key, key_tag_name, self_encrypted_ciphertext) = event
989 .tags
990 .iter()
991 .find_map(|t| {
992 let slice = t.as_slice();
993 if slice.len() < 2 {
994 return None;
995 }
996 let tag_name = slice[0].as_str();
997 let tag_value = slice[1].to_string();
998 if tag_name == "selfEncryptedKey" {
999 return Some((None, Some(tag_name.to_string()), Some(tag_value)));
1000 }
1001 if tag_name == "key" || tag_name == "encryptedKey" {
1002 if let Ok(bytes) = hex::decode(&tag_value) {
1003 if bytes.len() == 32 {
1004 let mut key = [0u8; 32];
1005 key.copy_from_slice(&bytes);
1006 return Some((Some(key), Some(tag_name.to_string()), None));
1007 }
1008 }
1009 }
1010 None
1011 })
1012 .unwrap_or((None, None, None));
1013
1014 RootEventData {
1015 root_hash,
1016 encryption_key,
1017 key_tag_name,
1018 self_encrypted_ciphertext,
1019 }
1020 }
1021
1022 fn parse_daemon_response_to_root_data(
1023 response: DaemonResolveResponse,
1024 ) -> Option<RootEventData> {
1025 let root_hash = response.hash?;
1026 if root_hash.is_empty() {
1027 return None;
1028 }
1029
1030 let mut data = RootEventData {
1031 root_hash,
1032 encryption_key: None,
1033 key_tag_name: None,
1034 self_encrypted_ciphertext: None,
1035 };
1036
1037 if let Some(ciphertext) = response.self_encrypted_key {
1038 data.key_tag_name = Some("selfEncryptedKey".to_string());
1039 data.self_encrypted_ciphertext = Some(ciphertext);
1040 return Some(data);
1041 }
1042
1043 let (tag_name, tag_value) = if let Some(v) = response.encrypted_key {
1044 ("encryptedKey", v)
1045 } else if let Some(v) = response.key {
1046 ("key", v)
1047 } else {
1048 return Some(data);
1049 };
1050
1051 if let Ok(bytes) = hex::decode(&tag_value) {
1052 if bytes.len() == 32 {
1053 let mut key = [0u8; 32];
1054 key.copy_from_slice(&bytes);
1055 data.encryption_key = Some(key);
1056 data.key_tag_name = Some(tag_name.to_string());
1057 }
1058 }
1059
1060 Some(data)
1061 }
1062
1063 async fn fetch_root_from_local_daemon(
1064 &self,
1065 repo_name: &str,
1066 timeout: Duration,
1067 ) -> Option<RootEventData> {
1068 let base = self.local_daemon_url.as_ref()?;
1069 let url = format!(
1070 "{}/api/nostr/resolve/{}/{}",
1071 base.trim_end_matches('/'),
1072 self.pubkey,
1073 repo_name
1074 );
1075
1076 let client = reqwest::Client::builder().timeout(timeout).build().ok()?;
1077 let response = client.get(&url).send().await.ok()?;
1078 if !response.status().is_success() {
1079 return None;
1080 }
1081
1082 let payload: DaemonResolveResponse = response.json().await.ok()?;
1083 let source = payload
1084 .source
1085 .clone()
1086 .unwrap_or_else(|| "unknown".to_string());
1087 let parsed = Self::parse_daemon_response_to_root_data(payload)?;
1088 debug!(
1089 "Resolved repo {} via local daemon source={}",
1090 repo_name, source
1091 );
1092 Some(parsed)
1093 }
1094
1095 async fn fetch_refs_async_with_timeout(
1096 &self,
1097 repo_name: &str,
1098 timeout_secs: u64,
1099 ) -> Result<(HashMap<String, String>, Option<String>, Option<[u8; 32]>)> {
1100 let client = Client::default();
1102
1103 for relay in &self.relays {
1105 if let Err(e) = client.add_relay(relay).await {
1106 warn!("Failed to add relay {}: {}", relay, e);
1107 }
1108 }
1109
1110 client.connect().await;
1112
1113 let connect_timeout = Duration::from_secs(2);
1114 let query_timeout = Duration::from_secs(timeout_secs.saturating_sub(2).max(3));
1115 let local_daemon_timeout = Duration::from_secs(4);
1116 let retry_delay = Duration::from_millis(300);
1117 let max_attempts = 2;
1118
1119 let start = std::time::Instant::now();
1120
1121 let author = PublicKey::from_hex(&self.pubkey)
1123 .map_err(|e| anyhow::anyhow!("Invalid pubkey: {}", e))?;
1124
1125 let filter = build_repo_event_filter(author, repo_name);
1126
1127 debug!("Querying relays for repo {} events", repo_name);
1128
1129 let mut root_data = None;
1130 for attempt in 1..=max_attempts {
1131 let connect_start = std::time::Instant::now();
1134 let mut last_log = std::time::Instant::now();
1135 let mut has_connected_relay = false;
1136 loop {
1137 let relays = client.relays().await;
1138 let total = relays.len();
1139 let mut connected = 0;
1140 for relay in relays.values() {
1141 if relay.is_connected().await {
1142 connected += 1;
1143 }
1144 }
1145 if connected > 0 {
1146 debug!(
1147 "Connected to {}/{} relay(s) in {:?} (attempt {}/{})",
1148 connected,
1149 total,
1150 start.elapsed(),
1151 attempt,
1152 max_attempts
1153 );
1154 has_connected_relay = true;
1155 break;
1156 }
1157 if last_log.elapsed() > Duration::from_millis(500) {
1158 debug!(
1159 "Connecting to relays... (0/{} after {:?}, attempt {}/{})",
1160 total,
1161 start.elapsed(),
1162 attempt,
1163 max_attempts
1164 );
1165 last_log = std::time::Instant::now();
1166 }
1167 if connect_start.elapsed() > connect_timeout {
1168 debug!(
1169 "Timeout waiting for relay connections - continuing with local-daemon fallback"
1170 );
1171 break;
1172 }
1173 tokio::time::sleep(Duration::from_millis(50)).await;
1174 }
1175
1176 let events = if has_connected_relay {
1180 match client
1181 .get_events_of(
1182 vec![filter.clone()],
1183 EventSource::relays(Some(query_timeout)),
1184 )
1185 .await
1186 {
1187 Ok(events) => events,
1188 Err(e) => {
1189 warn!("Failed to fetch events: {}", e);
1190 vec![]
1191 }
1192 }
1193 } else {
1194 vec![]
1195 };
1196
1197 debug!(
1198 "Got {} events from relays on attempt {}/{}",
1199 events.len(),
1200 attempt,
1201 max_attempts
1202 );
1203 let relay_event = pick_latest_repo_event(events.iter(), repo_name);
1204
1205 if let Some(event) = relay_event {
1206 debug!(
1207 "Found relay event with root hash: {}",
1208 &event.content[..12.min(event.content.len())]
1209 );
1210 root_data = Some(Self::parse_root_event_data_from_event(event));
1211 break;
1212 }
1213
1214 if let Some(data) = self
1215 .fetch_root_from_local_daemon(repo_name, local_daemon_timeout)
1216 .await
1217 {
1218 root_data = Some(data);
1219 break;
1220 }
1221
1222 if attempt < max_attempts {
1223 debug!(
1224 "No hashtree event found for {} on attempt {}/{}; retrying",
1225 repo_name, attempt, max_attempts
1226 );
1227 tokio::time::sleep(retry_delay).await;
1228 }
1229 }
1230
1231 let _ = client.disconnect().await;
1233
1234 let root_data = match root_data {
1235 Some(data) => data,
1236 None => {
1237 anyhow::bail!(
1238 "Repository '{}' not found (no hashtree event published by {})",
1239 repo_name,
1240 Self::format_repo_author(&self.pubkey)
1241 );
1242 }
1243 };
1244
1245 let root_hash = root_data.root_hash;
1246
1247 if root_hash.is_empty() {
1248 debug!("Empty root hash in event");
1249 return Ok((HashMap::new(), None, None));
1250 }
1251
1252 let encryption_key = root_data.encryption_key;
1253 let key_tag_name = root_data.key_tag_name;
1254 let self_encrypted_ciphertext = root_data.self_encrypted_ciphertext;
1255
1256 let unmasked_key = match key_tag_name.as_deref() {
1258 Some("encryptedKey") => {
1259 if let (Some(masked), Some(secret)) = (encryption_key, self.url_secret) {
1261 let mut unmasked = [0u8; 32];
1262 for i in 0..32 {
1263 unmasked[i] = masked[i] ^ secret[i];
1264 }
1265 Some(unmasked)
1266 } else {
1267 anyhow::bail!(
1268 "This repo is link-visible and requires a secret key.\n\
1269 Use: htree://.../{repo_name}#k=<secret>\n\
1270 Ask the repo owner for the full URL with the secret."
1271 );
1272 }
1273 }
1274 Some("selfEncryptedKey") => {
1275 if !self.is_private {
1277 anyhow::bail!(
1278 "This repo is private (author-only).\n\
1279 Use: htree://.../{repo_name}#private\n\
1280 Only the author can access this repo."
1281 );
1282 }
1283
1284 if let Some(keys) = &self.keys {
1286 if let Some(ciphertext) = self_encrypted_ciphertext {
1287 let pubkey = keys.public_key();
1289 match nip44::decrypt(keys.secret_key(), &pubkey, &ciphertext) {
1290 Ok(key_hex) => {
1291 let key_bytes =
1292 hex::decode(&key_hex).context("Invalid decrypted key hex")?;
1293 if key_bytes.len() != 32 {
1294 anyhow::bail!("Decrypted key wrong length");
1295 }
1296 let mut key = [0u8; 32];
1297 key.copy_from_slice(&key_bytes);
1298 Some(key)
1299 }
1300 Err(e) => {
1301 anyhow::bail!(
1302 "Failed to decrypt private repo: {}\n\
1303 The repo may be corrupted or published with a different key.",
1304 e
1305 );
1306 }
1307 }
1308 } else {
1309 anyhow::bail!("selfEncryptedKey tag has invalid format");
1310 }
1311 } else {
1312 anyhow::bail!(
1313 "Cannot access this private repo.\n\
1314 Private repos can only be accessed by their author.\n\
1315 You don't have the secret key for this repo's owner."
1316 );
1317 }
1318 }
1319 Some("key") | None => {
1320 encryption_key
1322 }
1323 Some(other) => {
1324 warn!("Unknown key tag type: {}", other);
1325 encryption_key
1326 }
1327 };
1328
1329 info!(
1330 "Found root hash {} for {} (encrypted: {}, link_visible: {})",
1331 &root_hash[..12.min(root_hash.len())],
1332 repo_name,
1333 unmasked_key.is_some(),
1334 self.url_secret.is_some()
1335 );
1336
1337 let refs = self
1339 .fetch_refs_from_hashtree(&root_hash, unmasked_key.as_ref())
1340 .await?;
1341 Ok((refs, Some(root_hash), unmasked_key))
1342 }
1343
1344 fn decrypt_and_decode(
1346 &self,
1347 data: &[u8],
1348 key: Option<&[u8; 32]>,
1349 ) -> Option<hashtree_core::TreeNode> {
1350 let decrypted_data: Vec<u8>;
1351 let data_to_decode = if let Some(k) = key {
1352 match decrypt_chk(data, k) {
1353 Ok(d) => {
1354 decrypted_data = d;
1355 &decrypted_data
1356 }
1357 Err(e) => {
1358 debug!("Decryption failed: {}", e);
1359 return None;
1360 }
1361 }
1362 } else {
1363 data
1364 };
1365
1366 match decode_tree_node(data_to_decode) {
1367 Ok(node) => Some(node),
1368 Err(e) => {
1369 debug!("Failed to decode tree node: {}", e);
1370 None
1371 }
1372 }
1373 }
1374
1375 async fn fetch_refs_from_hashtree(
1378 &self,
1379 root_hash: &str,
1380 encryption_key: Option<&[u8; 32]>,
1381 ) -> Result<HashMap<String, String>> {
1382 let mut refs = HashMap::new();
1383 debug!(
1384 "fetch_refs_from_hashtree: downloading root {}",
1385 &root_hash[..12]
1386 );
1387
1388 let root_data = match self.blossom.download(root_hash).await {
1390 Ok(data) => {
1391 debug!("Downloaded {} bytes from blossom", data.len());
1392 data
1393 }
1394 Err(e) => {
1395 anyhow::bail!(
1396 "Failed to download root hash {}: {}",
1397 &root_hash[..12.min(root_hash.len())],
1398 e
1399 );
1400 }
1401 };
1402
1403 let root_node = match self.decrypt_and_decode(&root_data, encryption_key) {
1405 Some(node) => {
1406 debug!("Decoded root node with {} links", node.links.len());
1407 node
1408 }
1409 None => {
1410 debug!(
1411 "Failed to decode root node (encryption_key: {})",
1412 encryption_key.is_some()
1413 );
1414 return Ok(refs);
1415 }
1416 };
1417
1418 debug!(
1420 "Root links: {:?}",
1421 root_node
1422 .links
1423 .iter()
1424 .map(|l| l.name.as_deref())
1425 .collect::<Vec<_>>()
1426 );
1427 let git_link = root_node
1428 .links
1429 .iter()
1430 .find(|l| l.name.as_deref() == Some(".git"));
1431 let (git_hash, git_key) = match git_link {
1432 Some(link) => {
1433 debug!("Found .git link with key: {}", link.key.is_some());
1434 (hex::encode(link.hash), link.key)
1435 }
1436 None => {
1437 debug!("No .git directory in hashtree root");
1438 return Ok(refs);
1439 }
1440 };
1441
1442 let git_data = match self.blossom.download(&git_hash).await {
1444 Ok(data) => data,
1445 Err(e) => {
1446 anyhow::bail!(
1447 "Failed to download .git directory ({}): {}",
1448 &git_hash[..12],
1449 e
1450 );
1451 }
1452 };
1453
1454 let git_node = match self.decrypt_and_decode(&git_data, git_key.as_ref()) {
1455 Some(node) => {
1456 debug!(
1457 "Decoded .git node with {} links: {:?}",
1458 node.links.len(),
1459 node.links
1460 .iter()
1461 .map(|l| l.name.as_deref())
1462 .collect::<Vec<_>>()
1463 );
1464 node
1465 }
1466 None => {
1467 debug!("Failed to decode .git node (key: {})", git_key.is_some());
1468 return Ok(refs);
1469 }
1470 };
1471
1472 let refs_link = git_node
1474 .links
1475 .iter()
1476 .find(|l| l.name.as_deref() == Some("refs"));
1477 let (refs_hash, refs_key) = match refs_link {
1478 Some(link) => (hex::encode(link.hash), link.key),
1479 None => {
1480 debug!("No refs directory in .git");
1481 return Ok(refs);
1482 }
1483 };
1484
1485 let refs_data = match self.blossom.try_download(&refs_hash).await {
1487 Some(data) => data,
1488 None => {
1489 debug!("Could not download refs directory");
1490 return Ok(refs);
1491 }
1492 };
1493
1494 let refs_node = match self.decrypt_and_decode(&refs_data, refs_key.as_ref()) {
1495 Some(node) => node,
1496 None => {
1497 return Ok(refs);
1498 }
1499 };
1500
1501 if let Some(head_link) = git_node
1503 .links
1504 .iter()
1505 .find(|l| l.name.as_deref() == Some("HEAD"))
1506 {
1507 let head_hash = hex::encode(head_link.hash);
1508 if let Some(head_data) = self.blossom.try_download(&head_hash).await {
1509 let head_content = if let Some(k) = head_link.key.as_ref() {
1511 match decrypt_chk(&head_data, k) {
1512 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1513 Err(_) => String::from_utf8_lossy(&head_data).trim().to_string(),
1514 }
1515 } else {
1516 String::from_utf8_lossy(&head_data).trim().to_string()
1517 };
1518 refs.insert("HEAD".to_string(), head_content);
1519 }
1520 }
1521
1522 for subdir_link in &refs_node.links {
1524 if subdir_link.link_type != LinkType::Dir {
1525 continue;
1526 }
1527 let subdir_name = match &subdir_link.name {
1528 Some(n) => n.clone(),
1529 None => continue,
1530 };
1531 let subdir_hash = hex::encode(subdir_link.hash);
1532
1533 self.collect_refs_recursive(
1534 &subdir_hash,
1535 subdir_link.key.as_ref(),
1536 &format!("refs/{}", subdir_name),
1537 &mut refs,
1538 )
1539 .await;
1540 }
1541
1542 debug!("Found {} refs from hashtree", refs.len());
1543 Ok(refs)
1544 }
1545
1546 async fn collect_refs_recursive(
1548 &self,
1549 dir_hash: &str,
1550 dir_key: Option<&[u8; 32]>,
1551 prefix: &str,
1552 refs: &mut HashMap<String, String>,
1553 ) {
1554 let dir_data = match self.blossom.try_download(dir_hash).await {
1555 Some(data) => data,
1556 None => return,
1557 };
1558
1559 let dir_node = match self.decrypt_and_decode(&dir_data, dir_key) {
1560 Some(node) => node,
1561 None => return,
1562 };
1563
1564 for link in &dir_node.links {
1565 let name = match &link.name {
1566 Some(n) => n.clone(),
1567 None => continue,
1568 };
1569 let link_hash = hex::encode(link.hash);
1570 let ref_path = format!("{}/{}", prefix, name);
1571
1572 if link.link_type == LinkType::Dir {
1573 Box::pin(self.collect_refs_recursive(
1575 &link_hash,
1576 link.key.as_ref(),
1577 &ref_path,
1578 refs,
1579 ))
1580 .await;
1581 } else {
1582 if let Some(ref_data) = self.blossom.try_download(&link_hash).await {
1584 let sha = if let Some(k) = link.key.as_ref() {
1586 match decrypt_chk(&ref_data, k) {
1587 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1588 Err(_) => String::from_utf8_lossy(&ref_data).trim().to_string(),
1589 }
1590 } else {
1591 String::from_utf8_lossy(&ref_data).trim().to_string()
1592 };
1593 if !sha.is_empty() {
1594 debug!("Found ref {} -> {}", ref_path, sha);
1595 refs.insert(ref_path, sha);
1596 }
1597 }
1598 }
1599 }
1600 }
1601
1602 #[allow(dead_code)]
1604 pub fn update_ref(&mut self, repo_name: &str, ref_name: &str, sha: &str) -> Result<()> {
1605 info!("Updating ref {} -> {} for {}", ref_name, sha, repo_name);
1606
1607 let refs = self.cached_refs.entry(repo_name.to_string()).or_default();
1608 refs.insert(ref_name.to_string(), sha.to_string());
1609
1610 Ok(())
1611 }
1612
1613 pub fn delete_ref(&mut self, repo_name: &str, ref_name: &str) -> Result<()> {
1615 info!("Deleting ref {} for {}", ref_name, repo_name);
1616
1617 if let Some(refs) = self.cached_refs.get_mut(repo_name) {
1618 refs.remove(ref_name);
1619 }
1620
1621 Ok(())
1622 }
1623
1624 pub fn get_cached_root_hash(&self, repo_name: &str) -> Option<&String> {
1626 self.cached_root_hash.get(repo_name)
1627 }
1628
1629 pub fn get_cached_encryption_key(&self, repo_name: &str) -> Option<&[u8; 32]> {
1631 self.cached_encryption_key.get(repo_name)
1632 }
1633
1634 pub fn blossom(&self) -> &BlossomClient {
1636 &self.blossom
1637 }
1638
1639 pub fn relay_urls(&self) -> Vec<String> {
1641 self.relays.clone()
1642 }
1643
1644 #[allow(dead_code)]
1646 pub fn pubkey(&self) -> &str {
1647 &self.pubkey
1648 }
1649
1650 pub fn npub(&self) -> String {
1652 PublicKey::from_hex(&self.pubkey)
1653 .ok()
1654 .and_then(|pk| pk.to_bech32().ok())
1655 .unwrap_or_else(|| self.pubkey.clone())
1656 }
1657
1658 pub fn publish_repo(
1666 &self,
1667 repo_name: &str,
1668 root_hash: &str,
1669 encryption_key: Option<(&[u8; 32], bool, bool)>,
1670 ) -> Result<(String, RelayResult)> {
1671 let keys = self.keys.as_ref().context(format!(
1672 "Cannot push: no secret key for {}. You can only push to your own repos.",
1673 &self.pubkey[..16]
1674 ))?;
1675
1676 info!(
1677 "Publishing repo {} with root hash {} (encrypted: {})",
1678 repo_name,
1679 root_hash,
1680 encryption_key.is_some()
1681 );
1682
1683 let rt = tokio::runtime::Builder::new_multi_thread()
1685 .enable_all()
1686 .build()
1687 .context("Failed to create tokio runtime")?;
1688
1689 let result =
1690 rt.block_on(self.publish_repo_async(keys, repo_name, root_hash, encryption_key));
1691
1692 rt.shutdown_timeout(std::time::Duration::from_millis(500));
1695
1696 result
1697 }
1698
1699 async fn publish_repo_async(
1700 &self,
1701 keys: &Keys,
1702 repo_name: &str,
1703 root_hash: &str,
1704 encryption_key: Option<(&[u8; 32], bool, bool)>,
1705 ) -> Result<(String, RelayResult)> {
1706 let client = Client::new(keys.clone());
1708
1709 let configured: Vec<String> = self.relays.clone();
1710 let mut connected: Vec<String> = Vec::new();
1711 let mut failed: Vec<String> = Vec::new();
1712
1713 for relay in &self.relays {
1715 if let Err(e) = client.add_relay(relay).await {
1716 warn!("Failed to add relay {}: {}", relay, e);
1717 failed.push(relay.clone());
1718 }
1719 }
1720
1721 client.connect().await;
1723
1724 let connect_timeout = Duration::from_secs(3);
1726 let start = std::time::Instant::now();
1727 loop {
1728 let relays = client.relays().await;
1729 let mut any_connected = false;
1730 for (_url, relay) in relays.iter() {
1731 if relay.is_connected().await {
1732 any_connected = true;
1733 break;
1734 }
1735 }
1736 if any_connected {
1737 break;
1738 }
1739 if start.elapsed() > connect_timeout {
1740 break;
1741 }
1742 tokio::time::sleep(Duration::from_millis(50)).await;
1743 }
1744
1745 let publish_created_at = next_replaceable_created_at(
1746 Timestamp::now(),
1747 latest_repo_event_created_at(
1748 &client,
1749 keys.public_key(),
1750 repo_name,
1751 Duration::from_secs(2),
1752 )
1753 .await,
1754 );
1755
1756 let mut tags = vec![
1758 Tag::custom(TagKind::custom("d"), vec![repo_name.to_string()]),
1759 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
1760 Tag::custom(TagKind::custom("hash"), vec![root_hash.to_string()]),
1761 ];
1762
1763 if let Some((key, is_link_visible, is_self_private)) = encryption_key {
1769 if is_self_private {
1770 let pubkey = keys.public_key();
1772 let key_hex = hex::encode(key);
1773 let encrypted =
1774 nip44::encrypt(keys.secret_key(), &pubkey, &key_hex, nip44::Version::V2)
1775 .map_err(|e| anyhow::anyhow!("NIP-44 encryption failed: {}", e))?;
1776 tags.push(Tag::custom(
1777 TagKind::custom("selfEncryptedKey"),
1778 vec![encrypted],
1779 ));
1780 } else if is_link_visible {
1781 tags.push(Tag::custom(
1783 TagKind::custom("encryptedKey"),
1784 vec![hex::encode(key)],
1785 ));
1786 } else {
1787 tags.push(Tag::custom(TagKind::custom("key"), vec![hex::encode(key)]));
1789 }
1790 }
1791
1792 append_repo_discovery_labels(&mut tags, repo_name);
1793
1794 let event = EventBuilder::new(Kind::Custom(KIND_APP_DATA), root_hash, tags)
1796 .custom_created_at(publish_created_at)
1797 .to_event(keys)
1798 .map_err(|e| anyhow::anyhow!("Failed to sign event: {}", e))?;
1799
1800 match client.send_event(event.clone()).await {
1802 Ok(output) => {
1803 for url in output.success.iter() {
1805 let url_str = url.to_string();
1806 if !connected.contains(&url_str) {
1807 connected.push(url_str);
1808 }
1809 }
1810 for (url, err) in output.failed.iter() {
1812 if err.is_some() {
1813 let url_str = url.to_string();
1814 if !failed.contains(&url_str) && !connected.contains(&url_str) {
1815 failed.push(url_str);
1816 }
1817 }
1818 }
1819 info!(
1820 "Sent event {} to {} relays ({} failed)",
1821 output.id(),
1822 output.success.len(),
1823 output.failed.len()
1824 );
1825 }
1826 Err(e) => {
1827 warn!("Failed to send event: {}", e);
1828 for relay in &self.relays {
1830 if !failed.contains(relay) {
1831 failed.push(relay.clone());
1832 }
1833 }
1834 }
1835 };
1836
1837 let npub_url = keys
1839 .public_key()
1840 .to_bech32()
1841 .map(|npub| format!("htree://{}/{}", npub, repo_name))
1842 .unwrap_or_else(|_| format!("htree://{}/{}", &self.pubkey[..16], repo_name));
1843
1844 let relay_validation = validate_repo_publish_relays(&configured, &connected);
1845
1846 let _ = client.disconnect().await;
1848 tokio::time::sleep(Duration::from_millis(50)).await;
1849
1850 relay_validation?;
1851
1852 Ok((
1853 npub_url,
1854 RelayResult {
1855 configured,
1856 connected,
1857 failed,
1858 },
1859 ))
1860 }
1861
1862 pub fn fetch_prs(
1864 &self,
1865 repo_name: &str,
1866 state_filter: PullRequestStateFilter,
1867 ) -> Result<Vec<PullRequestListItem>> {
1868 let rt = tokio::runtime::Builder::new_multi_thread()
1869 .enable_all()
1870 .build()
1871 .context("Failed to create tokio runtime")?;
1872
1873 let result = rt.block_on(self.fetch_prs_async(repo_name, state_filter));
1874 rt.shutdown_timeout(Duration::from_millis(500));
1875 result
1876 }
1877
1878 pub async fn fetch_prs_async(
1879 &self,
1880 repo_name: &str,
1881 state_filter: PullRequestStateFilter,
1882 ) -> Result<Vec<PullRequestListItem>> {
1883 let client = Client::default();
1884
1885 for relay in &self.relays {
1886 if let Err(e) = client.add_relay(relay).await {
1887 warn!("Failed to add relay {}: {}", relay, e);
1888 }
1889 }
1890 client.connect().await;
1891
1892 let start = std::time::Instant::now();
1894 loop {
1895 let relays = client.relays().await;
1896 let mut connected = false;
1897 for relay in relays.values() {
1898 if relay.is_connected().await {
1899 connected = true;
1900 break;
1901 }
1902 }
1903 if connected {
1904 break;
1905 }
1906 if start.elapsed() > Duration::from_secs(2) {
1907 let _ = client.disconnect().await;
1908 return Err(anyhow::anyhow!(
1909 "Failed to connect to any relay while fetching PRs"
1910 ));
1911 }
1912 tokio::time::sleep(Duration::from_millis(50)).await;
1913 }
1914
1915 let repo_address = format!("{}:{}:{}", KIND_REPO_ANNOUNCEMENT, self.pubkey, repo_name);
1917 let pr_filter = Filter::new()
1918 .kind(Kind::Custom(KIND_PULL_REQUEST))
1919 .custom_tag(SingleLetterTag::lowercase(Alphabet::A), vec![&repo_address]);
1920
1921 let pr_events = match tokio::time::timeout(
1922 Duration::from_secs(3),
1923 client.get_events_of(vec![pr_filter], EventSource::relays(None)),
1924 )
1925 .await
1926 {
1927 Ok(Ok(events)) => events,
1928 Ok(Err(e)) => {
1929 let _ = client.disconnect().await;
1930 return Err(anyhow::anyhow!(
1931 "Failed to fetch PR events from relays: {}",
1932 e
1933 ));
1934 }
1935 Err(_) => {
1936 let _ = client.disconnect().await;
1937 return Err(anyhow::anyhow!("Timed out fetching PR events from relays"));
1938 }
1939 };
1940
1941 if pr_events.is_empty() {
1942 let _ = client.disconnect().await;
1943 return Ok(Vec::new());
1944 }
1945
1946 let pr_ids: Vec<String> = pr_events.iter().map(|e| e.id.to_hex()).collect();
1948
1949 let status_filter = Filter::new()
1951 .kinds(vec![
1952 Kind::Custom(KIND_STATUS_OPEN),
1953 Kind::Custom(KIND_STATUS_APPLIED),
1954 Kind::Custom(KIND_STATUS_CLOSED),
1955 Kind::Custom(KIND_STATUS_DRAFT),
1956 ])
1957 .custom_tag(
1958 SingleLetterTag::lowercase(Alphabet::E),
1959 pr_ids.iter().map(|s| s.as_str()).collect::<Vec<_>>(),
1960 );
1961
1962 let status_events = match tokio::time::timeout(
1963 Duration::from_secs(3),
1964 client.get_events_of(vec![status_filter], EventSource::relays(None)),
1965 )
1966 .await
1967 {
1968 Ok(Ok(events)) => events,
1969 Ok(Err(e)) => {
1970 let _ = client.disconnect().await;
1971 return Err(anyhow::anyhow!(
1972 "Failed to fetch PR status events from relays: {}",
1973 e
1974 ));
1975 }
1976 Err(_) => {
1977 let _ = client.disconnect().await;
1978 return Err(anyhow::anyhow!(
1979 "Timed out fetching PR status events from relays"
1980 ));
1981 }
1982 };
1983
1984 let _ = client.disconnect().await;
1985
1986 let latest_status =
1988 latest_trusted_pr_status_kinds(&pr_events, &status_events, &self.pubkey);
1989
1990 let mut prs = Vec::new();
1991 for event in &pr_events {
1992 let pr_id = event.id.to_hex();
1993 let state =
1994 PullRequestState::from_latest_status_kind(latest_status.get(&pr_id).copied());
1995 if !state_filter.includes(state) {
1996 continue;
1997 }
1998
1999 let mut subject = None;
2000 let mut commit_tip = None;
2001 let mut branch = None;
2002 let mut target_branch = None;
2003
2004 for tag in event.tags.iter() {
2005 let slice = tag.as_slice();
2006 if slice.len() >= 2 {
2007 match slice[0].as_str() {
2008 "subject" => subject = Some(slice[1].to_string()),
2009 "c" => commit_tip = Some(slice[1].to_string()),
2010 "branch" => branch = Some(slice[1].to_string()),
2011 "target-branch" => target_branch = Some(slice[1].to_string()),
2012 _ => {}
2013 }
2014 }
2015 }
2016
2017 prs.push(PullRequestListItem {
2018 event_id: pr_id,
2019 author_pubkey: event.pubkey.to_hex(),
2020 state,
2021 subject,
2022 commit_tip,
2023 branch,
2024 target_branch,
2025 created_at: event.created_at.as_u64(),
2026 });
2027 }
2028
2029 prs.sort_by(|left, right| {
2031 right
2032 .created_at
2033 .cmp(&left.created_at)
2034 .then_with(|| right.event_id.cmp(&left.event_id))
2035 });
2036
2037 debug!(
2038 "Found {} PRs for {} (filter: {:?})",
2039 prs.len(),
2040 repo_name,
2041 state_filter
2042 );
2043 Ok(prs)
2044 }
2045
2046 pub fn publish_pr_merged_status(
2048 &self,
2049 pr_event_id: &str,
2050 pr_author_pubkey: &str,
2051 ) -> Result<()> {
2052 let keys = self
2053 .keys
2054 .as_ref()
2055 .context("Cannot publish status: no secret key")?;
2056
2057 let rt = tokio::runtime::Builder::new_multi_thread()
2058 .enable_all()
2059 .build()
2060 .context("Failed to create tokio runtime")?;
2061
2062 let result =
2063 rt.block_on(self.publish_pr_merged_status_async(keys, pr_event_id, pr_author_pubkey));
2064 rt.shutdown_timeout(Duration::from_millis(500));
2065 result
2066 }
2067
2068 async fn publish_pr_merged_status_async(
2069 &self,
2070 keys: &Keys,
2071 pr_event_id: &str,
2072 pr_author_pubkey: &str,
2073 ) -> Result<()> {
2074 let client = Client::new(keys.clone());
2075
2076 for relay in &self.relays {
2077 if let Err(e) = client.add_relay(relay).await {
2078 warn!("Failed to add relay {}: {}", relay, e);
2079 }
2080 }
2081 client.connect().await;
2082
2083 let start = std::time::Instant::now();
2085 loop {
2086 let relays = client.relays().await;
2087 let mut connected = false;
2088 for relay in relays.values() {
2089 if relay.is_connected().await {
2090 connected = true;
2091 break;
2092 }
2093 }
2094 if connected {
2095 break;
2096 }
2097 if start.elapsed() > Duration::from_secs(3) {
2098 anyhow::bail!("Failed to connect to any relay for status publish");
2099 }
2100 tokio::time::sleep(Duration::from_millis(50)).await;
2101 }
2102
2103 let tags = vec![
2104 Tag::custom(TagKind::custom("e"), vec![pr_event_id.to_string()]),
2105 Tag::custom(TagKind::custom("p"), vec![pr_author_pubkey.to_string()]),
2106 ];
2107
2108 let event = EventBuilder::new(Kind::Custom(KIND_STATUS_APPLIED), "", tags)
2109 .to_event(keys)
2110 .map_err(|e| anyhow::anyhow!("Failed to sign status event: {}", e))?;
2111
2112 let publish_result = match client.send_event(event).await {
2113 Ok(output) => {
2114 if output.success.is_empty() {
2115 Err(anyhow::anyhow!(
2116 "PR merged status was not confirmed by any relay"
2117 ))
2118 } else {
2119 info!(
2120 "Published PR merged status to {} relays",
2121 output.success.len()
2122 );
2123 Ok(())
2124 }
2125 }
2126 Err(e) => Err(anyhow::anyhow!("Failed to publish PR merged status: {}", e)),
2127 };
2128
2129 let _ = client.disconnect().await;
2130 tokio::time::sleep(Duration::from_millis(50)).await;
2131 publish_result
2132 }
2133
2134 #[allow(dead_code)]
2136 pub async fn upload_blob(&self, _hash: &str, data: &[u8]) -> Result<String> {
2137 let hash = self
2138 .blossom
2139 .upload(data)
2140 .await
2141 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))?;
2142 Ok(hash)
2143 }
2144
2145 #[allow(dead_code)]
2147 pub async fn upload_blob_if_missing(&self, data: &[u8]) -> Result<(String, bool)> {
2148 self.blossom
2149 .upload_if_missing(data)
2150 .await
2151 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))
2152 }
2153
2154 #[allow(dead_code)]
2156 pub async fn download_blob(&self, hash: &str) -> Result<Vec<u8>> {
2157 self.blossom
2158 .download(hash)
2159 .await
2160 .map_err(|e| anyhow::anyhow!("Blossom download failed: {}", e))
2161 }
2162
2163 #[allow(dead_code)]
2165 pub async fn try_download_blob(&self, hash: &str) -> Option<Vec<u8>> {
2166 self.blossom.try_download(hash).await
2167 }
2168}
2169
2170#[cfg(test)]
2171mod tests {
2172 use super::*;
2173
2174 const TEST_PUBKEY: &str = "4523be58d395b1b196a9b8c82b038b6895cb02b683d0c253a955068dba1facd0";
2175
2176 fn test_config() -> Config {
2177 Config::default()
2178 }
2179
2180 #[test]
2181 fn test_new_client() {
2182 let config = test_config();
2183 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2184 assert!(!client.relays.is_empty());
2185 assert!(!client.can_sign());
2186 }
2187
2188 #[test]
2189 fn test_new_client_with_secret() {
2190 let config = test_config();
2191 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2192 let client =
2193 NostrClient::new(TEST_PUBKEY, Some(secret.to_string()), None, false, &config).unwrap();
2194 assert!(client.can_sign());
2195 }
2196
2197 #[test]
2198 fn test_new_client_uses_local_read_server_as_daemon_fallback() {
2199 let mut config = test_config();
2200 config.server.bind_address = "127.0.0.1:1".to_string();
2201 config.blossom.read_servers = vec!["http://127.0.0.1:19092".to_string()];
2202
2203 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2204 assert_eq!(
2205 client.local_daemon_url.as_deref(),
2206 Some("http://127.0.0.1:19092")
2207 );
2208 }
2209
2210 #[test]
2211 fn test_fetch_refs_empty() {
2212 let config = test_config();
2213 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2214 let refs = client.cached_refs.get("new-repo");
2216 assert!(refs.is_none());
2217 }
2218
2219 #[test]
2220 fn test_validate_repo_publish_relays_allows_local_only_when_only_local_relays_configured() {
2221 let configured = vec!["ws://127.0.0.1:8080/ws".to_string()];
2222 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2223
2224 assert!(validate_repo_publish_relays(&configured, &connected).is_ok());
2225 }
2226
2227 #[test]
2228 fn test_validate_repo_publish_relays_rejects_local_only_when_public_relays_configured() {
2229 let configured = vec![
2230 "ws://127.0.0.1:8080/ws".to_string(),
2231 "wss://relay.damus.io".to_string(),
2232 ];
2233 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2234
2235 let err = validate_repo_publish_relays(&configured, &connected)
2236 .expect_err("should reject local-only publication");
2237 assert!(err.to_string().contains("No public relay confirmed"));
2238 assert!(err.to_string().contains("local relays only"));
2239 }
2240
2241 #[test]
2242 fn test_update_ref() {
2243 let config = test_config();
2244 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2245
2246 client
2247 .update_ref("repo", "refs/heads/main", "abc123")
2248 .unwrap();
2249
2250 let refs = client.cached_refs.get("repo").unwrap();
2251 assert_eq!(refs.get("refs/heads/main"), Some(&"abc123".to_string()));
2252 }
2253
2254 #[test]
2255 fn test_pick_latest_event_prefers_newer_timestamp() {
2256 let keys = Keys::generate();
2257 let older = Timestamp::from_secs(1_700_000_000);
2258 let newer = Timestamp::from_secs(1_700_000_001);
2259
2260 let event_old = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "old", [])
2261 .custom_created_at(older)
2262 .to_event(&keys)
2263 .unwrap();
2264 let event_new = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "new", [])
2265 .custom_created_at(newer)
2266 .to_event(&keys)
2267 .unwrap();
2268
2269 let picked = pick_latest_event([&event_old, &event_new]).unwrap();
2270 assert_eq!(picked.id, event_new.id);
2271 }
2272
2273 #[test]
2274 fn test_pick_latest_event_breaks_ties_with_event_id() {
2275 let keys = Keys::generate();
2276 let created_at = Timestamp::from_secs(1_700_000_000);
2277
2278 let event_a = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "a", [])
2279 .custom_created_at(created_at)
2280 .to_event(&keys)
2281 .unwrap();
2282 let event_b = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "b", [])
2283 .custom_created_at(created_at)
2284 .to_event(&keys)
2285 .unwrap();
2286
2287 let expected_id = if event_a.id > event_b.id {
2288 event_a.id
2289 } else {
2290 event_b.id
2291 };
2292 let picked = pick_latest_event([&event_a, &event_b]).unwrap();
2293 assert_eq!(picked.id, expected_id);
2294 }
2295
2296 #[test]
2297 fn test_next_replaceable_created_at_uses_now_when_existing_is_older() {
2298 let now = Timestamp::from_secs(1_700_000_010);
2299 let existing = Timestamp::from_secs(1_700_000_009);
2300
2301 assert_eq!(
2302 next_replaceable_created_at(now, Some(existing)),
2303 now,
2304 "older repo events should not delay a new publish"
2305 );
2306 }
2307
2308 #[test]
2309 fn test_next_replaceable_created_at_bumps_same_second_events() {
2310 let now = Timestamp::from_secs(1_700_000_010);
2311 let existing = Timestamp::from_secs(1_700_000_010);
2312
2313 assert_eq!(
2314 next_replaceable_created_at(now, Some(existing)),
2315 Timestamp::from_secs(1_700_000_011),
2316 "same-second repo publishes need a strictly newer timestamp"
2317 );
2318 }
2319
2320 #[test]
2321 fn test_pick_latest_repo_event_ignores_newer_different_d_tag() {
2322 let keys = Keys::generate();
2323 let older = Timestamp::from_secs(1_700_000_000);
2324 let newer = Timestamp::from_secs(1_700_000_031);
2325
2326 let iris_chat = EventBuilder::new(
2327 Kind::Custom(KIND_APP_DATA),
2328 "good",
2329 [
2330 Tag::custom(TagKind::custom("d"), vec!["iris-chat".to_string()]),
2331 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2332 ],
2333 )
2334 .custom_created_at(older)
2335 .to_event(&keys)
2336 .unwrap();
2337
2338 let iris_chat_flutter = EventBuilder::new(
2339 Kind::Custom(KIND_APP_DATA),
2340 "bad",
2341 [
2342 Tag::custom(TagKind::custom("d"), vec!["iris-chat-flutter".to_string()]),
2343 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2344 ],
2345 )
2346 .custom_created_at(newer)
2347 .to_event(&keys)
2348 .unwrap();
2349
2350 let picked = pick_latest_repo_event([&iris_chat, &iris_chat_flutter], "iris-chat").unwrap();
2351 assert_eq!(picked.id, iris_chat.id);
2352 }
2353
2354 #[test]
2355 fn test_append_repo_discovery_labels_includes_git_label_and_prefixes() {
2356 let mut tags = vec![];
2357 append_repo_discovery_labels(&mut tags, "tools/hashtree");
2358
2359 let values: Vec<String> = tags
2360 .iter()
2361 .filter_map(|tag| {
2362 let parts = tag.as_slice();
2363 if parts.first().map(|kind| kind.as_str()) != Some("l") {
2364 return None;
2365 }
2366 parts.get(1).cloned()
2367 })
2368 .collect();
2369
2370 assert!(values.iter().any(|value| value == LABEL_GIT));
2371 assert!(values.iter().any(|value| value == "tools"));
2372 }
2373
2374 #[test]
2375 fn test_list_git_repo_announcements_filters_dedupes_and_sorts() {
2376 let keys = Keys::generate();
2377 let alpha_old = EventBuilder::new(
2378 Kind::Custom(KIND_APP_DATA),
2379 "old",
2380 [
2381 Tag::custom(TagKind::custom("d"), vec!["alpha".to_string()]),
2382 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2383 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2384 ],
2385 )
2386 .custom_created_at(Timestamp::from_secs(10))
2387 .to_event(&keys)
2388 .unwrap();
2389 let alpha_new = EventBuilder::new(
2390 Kind::Custom(KIND_APP_DATA),
2391 "new",
2392 [
2393 Tag::custom(TagKind::custom("d"), vec!["alpha".to_string()]),
2394 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2395 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2396 ],
2397 )
2398 .custom_created_at(Timestamp::from_secs(20))
2399 .to_event(&keys)
2400 .unwrap();
2401 let zeta = EventBuilder::new(
2402 Kind::Custom(KIND_APP_DATA),
2403 "zeta",
2404 [
2405 Tag::custom(TagKind::custom("d"), vec!["zeta/tools".to_string()]),
2406 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2407 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2408 ],
2409 )
2410 .custom_created_at(Timestamp::from_secs(15))
2411 .to_event(&keys)
2412 .unwrap();
2413 let ignored = EventBuilder::new(
2414 Kind::Custom(KIND_APP_DATA),
2415 "ignored",
2416 [
2417 Tag::custom(TagKind::custom("d"), vec!["not-git".to_string()]),
2418 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2419 ],
2420 )
2421 .custom_created_at(Timestamp::from_secs(30))
2422 .to_event(&keys)
2423 .unwrap();
2424
2425 let repos = list_git_repo_announcements(&[alpha_old, zeta, ignored, alpha_new]);
2426 let names: Vec<&str> = repos.iter().map(|repo| repo.repo_name.as_str()).collect();
2427
2428 assert_eq!(names, vec!["alpha", "zeta/tools"]);
2429 assert_eq!(repos[0].created_at, Timestamp::from_secs(20));
2430 }
2431
2432 #[test]
2433 fn test_parse_daemon_response_to_root_data_encrypted_key() {
2434 let payload = DaemonResolveResponse {
2435 hash: Some("ab".repeat(32)),
2436 key: None,
2437 encrypted_key: Some("11".repeat(32)),
2438 self_encrypted_key: None,
2439 source: Some("webrtc".to_string()),
2440 };
2441
2442 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2443 assert_eq!(parsed.root_hash, "ab".repeat(32));
2444 assert_eq!(parsed.key_tag_name.as_deref(), Some("encryptedKey"));
2445 assert!(parsed.self_encrypted_ciphertext.is_none());
2446 assert_eq!(parsed.encryption_key.unwrap(), [0x11; 32]);
2447 }
2448
2449 #[test]
2450 fn test_parse_daemon_response_to_root_data_self_encrypted() {
2451 let payload = DaemonResolveResponse {
2452 hash: Some("cd".repeat(32)),
2453 key: None,
2454 encrypted_key: None,
2455 self_encrypted_key: Some("ciphertext".to_string()),
2456 source: Some("webrtc".to_string()),
2457 };
2458
2459 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2460 assert_eq!(parsed.root_hash, "cd".repeat(32));
2461 assert_eq!(parsed.key_tag_name.as_deref(), Some("selfEncryptedKey"));
2462 assert_eq!(
2463 parsed.self_encrypted_ciphertext.as_deref(),
2464 Some("ciphertext")
2465 );
2466 assert!(parsed.encryption_key.is_none());
2467 }
2468
2469 #[tokio::test]
2470 async fn test_fetch_root_from_local_daemon_parses_response() {
2471 use axum::{extract::Path, routing::get, Json, Router};
2472 use serde_json::json;
2473
2474 let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
2475 let addr = listener.local_addr().unwrap();
2476 let app = Router::new().route(
2477 "/api/nostr/resolve/:pubkey/:treename",
2478 get(
2479 |Path((pubkey, treename)): Path<(String, String)>| async move {
2480 Json(json!({
2481 "key": format!("{}/{}", pubkey, treename),
2482 "hash": "ab".repeat(32),
2483 "source": "webrtc",
2484 "key_tag": "22".repeat(32),
2485 }))
2486 },
2487 ),
2488 );
2489
2490 let server = tokio::spawn(async move {
2491 let _ = axum::serve(listener, app).await;
2492 });
2493
2494 let config = test_config();
2495 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2496 client.local_daemon_url = Some(format!("http://{}", addr));
2497
2498 let resolved = client
2499 .fetch_root_from_local_daemon("repo", Duration::from_secs(2))
2500 .await
2501 .unwrap();
2502 assert_eq!(resolved.root_hash, "ab".repeat(32));
2503 assert_eq!(resolved.key_tag_name.as_deref(), Some("key"));
2504 assert_eq!(resolved.encryption_key, Some([0x22; 32]));
2505
2506 server.abort();
2507 }
2508
2509 #[test]
2510 fn test_stored_key_from_hex() {
2511 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2512 let key = StoredKey::from_secret_hex(secret, Some("test".to_string())).unwrap();
2513 assert_eq!(key.secret_hex.as_deref(), Some(secret));
2514 assert_eq!(key.petname, Some("test".to_string()));
2515 assert_eq!(key.pubkey_hex.len(), 64);
2516 }
2517
2518 #[test]
2519 fn test_stored_key_from_nsec() {
2520 let nsec = "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5";
2522 let key = StoredKey::from_nsec(nsec, None).unwrap();
2523 assert_eq!(key.secret_hex.as_deref().map(str::len), Some(64));
2524 assert_eq!(key.pubkey_hex.len(), 64);
2525 }
2526
2527 #[test]
2528 fn test_stored_key_from_npub_is_read_only() {
2529 let npub = "npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm";
2530 let key = StoredKey::from_npub(npub, Some("sirius".to_string())).unwrap();
2531
2532 assert!(key.secret_hex.is_none());
2533 assert_eq!(key.petname.as_deref(), Some("sirius"));
2534 assert_eq!(key.pubkey_hex.len(), 64);
2535 }
2536
2537 #[test]
2538 fn test_resolve_self_identity_ignores_read_only_aliases() {
2539 let read_only = StoredKey::from_npub(
2540 "npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm",
2541 Some("self".to_string()),
2542 )
2543 .unwrap();
2544 let signing = StoredKey::from_nsec(
2545 "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5",
2546 Some("work".to_string()),
2547 )
2548 .unwrap();
2549
2550 let resolved = resolve_self_identity(&[read_only, signing.clone()]).unwrap();
2551
2552 assert_eq!(resolved.0, signing.pubkey_hex);
2553 assert_eq!(resolved.1, signing.secret_hex);
2554 }
2555
2556 #[test]
2557 fn test_resolve_identity_hex_pubkey() {
2558 let result = resolve_identity(TEST_PUBKEY);
2560 assert!(result.is_ok());
2561 let (pubkey, secret) = result.unwrap();
2562 assert_eq!(pubkey, TEST_PUBKEY);
2563 assert!(secret.is_none());
2565 }
2566
2567 #[test]
2568 fn test_resolve_identity_npub() {
2569 let pk_bytes = hex::decode(TEST_PUBKEY).unwrap();
2571 let pk = PublicKey::from_slice(&pk_bytes).unwrap();
2572 let npub = pk.to_bech32().unwrap();
2573
2574 let result = resolve_identity(&npub);
2575 assert!(result.is_ok(), "Failed: {:?}", result.err());
2576 let (pubkey, _) = result.unwrap();
2577 assert_eq!(pubkey.len(), 64);
2579 assert_eq!(pubkey, TEST_PUBKEY);
2580 }
2581
2582 #[test]
2583 fn test_format_repo_author_uses_full_npub() {
2584 let formatted = NostrClient::format_repo_author(TEST_PUBKEY);
2585 let expected = PublicKey::from_hex(TEST_PUBKEY)
2586 .unwrap()
2587 .to_bech32()
2588 .unwrap();
2589
2590 assert_eq!(formatted, expected);
2591 assert!(!formatted.contains("..."));
2592 }
2593
2594 #[test]
2595 fn test_resolve_identity_unknown_petname() {
2596 let result = resolve_identity("nonexistent_petname_xyz");
2597 assert!(result.is_err());
2598 }
2599
2600 #[test]
2602 fn test_private_key_is_nip44_encrypted_not_plaintext() {
2603 use nostr_sdk::prelude::{nip44, Keys};
2604
2605 let keys = Keys::generate();
2607 let pubkey = keys.public_key();
2608
2609 let chk_key: [u8; 32] = [
2611 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab,
2612 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67,
2613 0x89, 0xab, 0xcd, 0xef,
2614 ];
2615 let plaintext_hex = hex::encode(&chk_key);
2616
2617 let encrypted = nip44::encrypt(
2619 keys.secret_key(),
2620 &pubkey,
2621 &plaintext_hex,
2622 nip44::Version::V2,
2623 )
2624 .expect("NIP-44 encryption should succeed");
2625
2626 assert_ne!(
2628 encrypted, plaintext_hex,
2629 "NIP-44 encrypted value must differ from plaintext CHK hex"
2630 );
2631
2632 assert!(
2634 !encrypted.contains(&plaintext_hex),
2635 "Encrypted value should not contain plaintext hex"
2636 );
2637
2638 let decrypted = nip44::decrypt(keys.secret_key(), &pubkey, &encrypted)
2640 .expect("NIP-44 decryption should succeed");
2641
2642 assert_eq!(
2643 decrypted, plaintext_hex,
2644 "Decrypted value should match original plaintext hex"
2645 );
2646 }
2647
2648 #[test]
2650 fn test_encryption_modes_produce_different_values() {
2651 use nostr_sdk::prelude::{nip44, Keys};
2652
2653 let keys = Keys::generate();
2654 let pubkey = keys.public_key();
2655
2656 let chk_key: [u8; 32] = [0xaa; 32];
2658 let plaintext_hex = hex::encode(&chk_key);
2659
2660 let public_value = plaintext_hex.clone();
2662
2663 let private_value = nip44::encrypt(
2667 keys.secret_key(),
2668 &pubkey,
2669 &plaintext_hex,
2670 nip44::Version::V2,
2671 )
2672 .expect("NIP-44 encryption should succeed");
2673
2674 assert_ne!(
2676 private_value, public_value,
2677 "Private (NIP-44) value must differ from public (plaintext) value"
2678 );
2679
2680 assert!(
2682 private_value.len() != 64,
2683 "NIP-44 output should not be 64 chars like hex CHK"
2684 );
2685 }
2686
2687 fn build_test_pr_event(keys: &Keys, created_at_secs: u64) -> Event {
2688 EventBuilder::new(
2689 Kind::Custom(KIND_PULL_REQUEST),
2690 "",
2691 [Tag::custom(
2692 TagKind::custom("subject"),
2693 vec!["test pr".to_string()],
2694 )],
2695 )
2696 .custom_created_at(Timestamp::from_secs(created_at_secs))
2697 .to_event(keys)
2698 .unwrap()
2699 }
2700
2701 fn build_test_status_event(
2702 keys: &Keys,
2703 kind: u16,
2704 pr_event_id: &str,
2705 created_at_secs: u64,
2706 ) -> Event {
2707 EventBuilder::new(
2708 Kind::Custom(kind),
2709 "",
2710 [Tag::custom(
2711 TagKind::custom("e"),
2712 vec![pr_event_id.to_string()],
2713 )],
2714 )
2715 .custom_created_at(Timestamp::from_secs(created_at_secs))
2716 .to_event(keys)
2717 .unwrap()
2718 }
2719
2720 #[test]
2721 fn test_pull_request_state_from_latest_status_kind_defaults_to_open() {
2722 assert_eq!(
2723 PullRequestState::from_latest_status_kind(None),
2724 PullRequestState::Open
2725 );
2726 assert_eq!(
2727 PullRequestState::from_latest_status_kind(Some(KIND_STATUS_OPEN)),
2728 PullRequestState::Open
2729 );
2730 assert_eq!(
2731 PullRequestState::from_latest_status_kind(Some(9999)),
2732 PullRequestState::Open
2733 );
2734 }
2735
2736 #[test]
2737 fn test_pull_request_state_from_status_kind_maps_known_kinds() {
2738 assert_eq!(
2739 PullRequestState::from_status_kind(KIND_STATUS_APPLIED),
2740 Some(PullRequestState::Applied)
2741 );
2742 assert_eq!(
2743 PullRequestState::from_status_kind(KIND_STATUS_CLOSED),
2744 Some(PullRequestState::Closed)
2745 );
2746 assert_eq!(
2747 PullRequestState::from_status_kind(KIND_STATUS_DRAFT),
2748 Some(PullRequestState::Draft)
2749 );
2750 assert_eq!(PullRequestState::from_status_kind(9999), None);
2751 }
2752
2753 #[test]
2754 fn test_pull_request_state_filter_includes_only_requested_state() {
2755 assert!(PullRequestStateFilter::Open.includes(PullRequestState::Open));
2756 assert!(!PullRequestStateFilter::Open.includes(PullRequestState::Closed));
2757 assert!(PullRequestStateFilter::All.includes(PullRequestState::Open));
2758 assert!(PullRequestStateFilter::All.includes(PullRequestState::Applied));
2759 assert!(PullRequestStateFilter::All.includes(PullRequestState::Closed));
2760 assert!(PullRequestStateFilter::All.includes(PullRequestState::Draft));
2761 }
2762
2763 #[test]
2764 fn test_pull_request_state_strings_are_stable() {
2765 assert_eq!(PullRequestState::Open.as_str(), "open");
2766 assert_eq!(PullRequestState::Applied.as_str(), "applied");
2767 assert_eq!(PullRequestState::Closed.as_str(), "closed");
2768 assert_eq!(PullRequestState::Draft.as_str(), "draft");
2769
2770 assert_eq!(PullRequestStateFilter::Open.as_str(), "open");
2771 assert_eq!(PullRequestStateFilter::Applied.as_str(), "applied");
2772 assert_eq!(PullRequestStateFilter::Closed.as_str(), "closed");
2773 assert_eq!(PullRequestStateFilter::Draft.as_str(), "draft");
2774 assert_eq!(PullRequestStateFilter::All.as_str(), "all");
2775 }
2776
2777 #[test]
2778 fn test_latest_trusted_pr_status_kinds_ignores_untrusted_signers() {
2779 let repo_owner = Keys::generate();
2780 let pr_author = Keys::generate();
2781 let attacker = Keys::generate();
2782
2783 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2784 let spoofed_status = build_test_status_event(
2785 &attacker,
2786 KIND_STATUS_CLOSED,
2787 &pr_event.id.to_hex(),
2788 1_700_100_010,
2789 );
2790
2791 let statuses = latest_trusted_pr_status_kinds(
2792 &[pr_event.clone()],
2793 &[spoofed_status],
2794 &repo_owner.public_key().to_hex(),
2795 );
2796
2797 assert!(
2798 !statuses.contains_key(&pr_event.id.to_hex()),
2799 "untrusted status signer should be ignored"
2800 );
2801 }
2802
2803 #[test]
2804 fn test_latest_trusted_pr_status_kinds_accepts_pr_author() {
2805 let repo_owner = Keys::generate();
2806 let pr_author = Keys::generate();
2807
2808 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2809 let author_status = build_test_status_event(
2810 &pr_author,
2811 KIND_STATUS_CLOSED,
2812 &pr_event.id.to_hex(),
2813 1_700_100_010,
2814 );
2815
2816 let statuses = latest_trusted_pr_status_kinds(
2817 &[pr_event.clone()],
2818 &[author_status],
2819 &repo_owner.public_key().to_hex(),
2820 );
2821
2822 assert_eq!(
2823 statuses.get(&pr_event.id.to_hex()).copied(),
2824 Some(KIND_STATUS_CLOSED)
2825 );
2826 }
2827
2828 #[test]
2829 fn test_latest_trusted_pr_status_kinds_rejects_applied_from_pr_author() {
2830 let repo_owner = Keys::generate();
2831 let pr_author = Keys::generate();
2832
2833 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2834 let author_applied = build_test_status_event(
2835 &pr_author,
2836 KIND_STATUS_APPLIED,
2837 &pr_event.id.to_hex(),
2838 1_700_100_010,
2839 );
2840
2841 let statuses = latest_trusted_pr_status_kinds(
2842 &[pr_event.clone()],
2843 &[author_applied],
2844 &repo_owner.public_key().to_hex(),
2845 );
2846
2847 assert!(
2848 !statuses.contains_key(&pr_event.id.to_hex()),
2849 "PR author must not be able to self-mark applied"
2850 );
2851 }
2852
2853 #[test]
2854 fn test_latest_trusted_pr_status_kinds_accepts_repo_owner() {
2855 let repo_owner = Keys::generate();
2856 let pr_author = Keys::generate();
2857
2858 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2859 let owner_status = build_test_status_event(
2860 &repo_owner,
2861 KIND_STATUS_APPLIED,
2862 &pr_event.id.to_hex(),
2863 1_700_100_010,
2864 );
2865
2866 let statuses = latest_trusted_pr_status_kinds(
2867 &[pr_event.clone()],
2868 &[owner_status],
2869 &repo_owner.public_key().to_hex(),
2870 );
2871
2872 assert_eq!(
2873 statuses.get(&pr_event.id.to_hex()).copied(),
2874 Some(KIND_STATUS_APPLIED)
2875 );
2876 }
2877
2878 #[test]
2879 fn test_latest_trusted_pr_status_kinds_preserves_owner_applied_over_newer_author_status() {
2880 let repo_owner = Keys::generate();
2881 let pr_author = Keys::generate();
2882
2883 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2884 let owner_applied = build_test_status_event(
2885 &repo_owner,
2886 KIND_STATUS_APPLIED,
2887 &pr_event.id.to_hex(),
2888 1_700_100_010,
2889 );
2890 let newer_author_open = build_test_status_event(
2891 &pr_author,
2892 KIND_STATUS_OPEN,
2893 &pr_event.id.to_hex(),
2894 1_700_100_020,
2895 );
2896
2897 let statuses = latest_trusted_pr_status_kinds(
2898 &[pr_event.clone()],
2899 &[owner_applied, newer_author_open],
2900 &repo_owner.public_key().to_hex(),
2901 );
2902
2903 assert_eq!(
2904 statuses.get(&pr_event.id.to_hex()).copied(),
2905 Some(KIND_STATUS_APPLIED),
2906 "owner-applied status should remain authoritative even if author publishes a newer status"
2907 );
2908 }
2909
2910 #[test]
2911 fn test_latest_trusted_pr_status_kinds_ignores_newer_untrusted_status() {
2912 let repo_owner = Keys::generate();
2913 let pr_author = Keys::generate();
2914 let attacker = Keys::generate();
2915
2916 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2917 let trusted_open = build_test_status_event(
2918 &repo_owner,
2919 KIND_STATUS_OPEN,
2920 &pr_event.id.to_hex(),
2921 1_700_100_010,
2922 );
2923 let spoofed_closed = build_test_status_event(
2924 &attacker,
2925 KIND_STATUS_CLOSED,
2926 &pr_event.id.to_hex(),
2927 1_700_100_020,
2928 );
2929
2930 let statuses = latest_trusted_pr_status_kinds(
2931 &[pr_event.clone()],
2932 &[trusted_open, spoofed_closed],
2933 &repo_owner.public_key().to_hex(),
2934 );
2935
2936 assert_eq!(
2937 statuses.get(&pr_event.id.to_hex()).copied(),
2938 Some(KIND_STATUS_OPEN)
2939 );
2940 }
2941}