1use anyhow::{Context, Result};
47use futures::{SinkExt, StreamExt};
48use hashtree_blossom::BlossomClient;
49use hashtree_core::{decode_tree_node, decrypt_chk, LinkType};
50use nostr_sdk::prelude::*;
51use serde::Deserialize;
52use std::collections::HashMap;
53use std::time::Duration;
54use tokio_tungstenite::{connect_async, tungstenite::Message as WsMessage};
55use tracing::{debug, info, warn};
56
57pub const KIND_APP_DATA: u16 = 30078;
59
60pub const KIND_PULL_REQUEST: u16 = 1618;
62pub const KIND_STATUS_OPEN: u16 = 1630;
63pub const KIND_STATUS_APPLIED: u16 = 1631;
64pub const KIND_STATUS_CLOSED: u16 = 1632;
65pub const KIND_STATUS_DRAFT: u16 = 1633;
66pub const KIND_REPO_ANNOUNCEMENT: u16 = 30617;
67
68pub const LABEL_HASHTREE: &str = "hashtree";
70pub const LABEL_GIT: &str = "git";
71
72#[derive(Debug, Clone, Copy, PartialEq, Eq)]
74pub enum PullRequestState {
75 Open,
76 Applied,
77 Closed,
78 Draft,
79}
80
81impl PullRequestState {
82 pub fn as_str(self) -> &'static str {
83 match self {
84 PullRequestState::Open => "open",
85 PullRequestState::Applied => "applied",
86 PullRequestState::Closed => "closed",
87 PullRequestState::Draft => "draft",
88 }
89 }
90
91 fn from_status_kind(status_kind: u16) -> Option<Self> {
92 match status_kind {
93 KIND_STATUS_OPEN => Some(PullRequestState::Open),
94 KIND_STATUS_APPLIED => Some(PullRequestState::Applied),
95 KIND_STATUS_CLOSED => Some(PullRequestState::Closed),
96 KIND_STATUS_DRAFT => Some(PullRequestState::Draft),
97 _ => None,
98 }
99 }
100
101 fn from_latest_status_kind(status_kind: Option<u16>) -> Self {
102 status_kind
103 .and_then(Self::from_status_kind)
104 .unwrap_or(PullRequestState::Open)
105 }
106}
107
108#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
110pub enum PullRequestStateFilter {
111 #[default]
112 Open,
113 Applied,
114 Closed,
115 Draft,
116 All,
117}
118
119impl PullRequestStateFilter {
120 pub fn as_str(self) -> &'static str {
121 match self {
122 PullRequestStateFilter::Open => "open",
123 PullRequestStateFilter::Applied => "applied",
124 PullRequestStateFilter::Closed => "closed",
125 PullRequestStateFilter::Draft => "draft",
126 PullRequestStateFilter::All => "all",
127 }
128 }
129
130 fn includes(self, state: PullRequestState) -> bool {
131 match self {
132 PullRequestStateFilter::All => true,
133 PullRequestStateFilter::Open => state == PullRequestState::Open,
134 PullRequestStateFilter::Applied => state == PullRequestState::Applied,
135 PullRequestStateFilter::Closed => state == PullRequestState::Closed,
136 PullRequestStateFilter::Draft => state == PullRequestState::Draft,
137 }
138 }
139}
140
141#[derive(Debug, Clone)]
143pub struct PullRequestListItem {
144 pub event_id: String,
145 pub author_pubkey: String,
146 pub state: PullRequestState,
147 pub subject: Option<String>,
148 pub commit_tip: Option<String>,
149 pub branch: Option<String>,
150 pub target_branch: Option<String>,
151 pub created_at: u64,
152}
153
154async fn fetch_events_via_raw_relay_query(
155 relays: &[String],
156 filter: Filter,
157 timeout: Duration,
158) -> Vec<Event> {
159 let request_json = ClientMessage::req(SubscriptionId::generate(), vec![filter]).as_json();
160 let mut events_by_id = HashMap::<String, Event>::new();
161
162 for relay_url in relays {
163 let relay_events = match tokio::time::timeout(timeout, async {
164 let (mut ws, _) = connect_async(relay_url).await?;
165 ws.send(WsMessage::Text(request_json.clone())).await?;
166
167 let mut relay_events = Vec::new();
168 while let Some(message) = ws.next().await {
169 let message = message?;
170 let WsMessage::Text(text) = message else {
171 continue;
172 };
173
174 match RelayMessage::from_json(text.as_str()) {
175 Ok(RelayMessage::Event { event, .. }) => relay_events.push(*event),
176 Ok(RelayMessage::EndOfStoredEvents(_)) => break,
177 Ok(RelayMessage::Closed { message, .. }) => {
178 debug!("Raw relay PR query closed by {}: {}", relay_url, message);
179 break;
180 }
181 Ok(_) => {}
182 Err(err) => {
183 debug!(
184 "Failed to parse raw relay response from {}: {}",
185 relay_url, err
186 );
187 }
188 }
189 }
190
191 let _ = ws.close(None).await;
192 Ok::<Vec<Event>, anyhow::Error>(relay_events)
193 })
194 .await
195 {
196 Ok(Ok(events)) => events,
197 Ok(Err(err)) => {
198 debug!("Raw relay PR query failed for {}: {}", relay_url, err);
199 continue;
200 }
201 Err(_) => {
202 debug!("Raw relay PR query timed out for {}", relay_url);
203 continue;
204 }
205 };
206
207 for event in relay_events {
208 events_by_id.insert(event.id.to_hex(), event);
209 }
210 }
211
212 events_by_id.into_values().collect()
213}
214
215type FetchedRefs = (HashMap<String, String>, Option<String>, Option<[u8; 32]>);
216
217#[derive(Debug, Clone, PartialEq, Eq)]
218struct GitRepoAnnouncement {
219 repo_name: String,
220 created_at: Timestamp,
221 event_id: EventId,
222}
223
224#[derive(Debug, Clone)]
226pub struct StoredKey {
227 pub secret_hex: Option<String>,
229 pub pubkey_hex: String,
231 pub petname: Option<String>,
233}
234
235impl StoredKey {
236 pub fn from_secret_hex(secret_hex: &str, petname: Option<String>) -> Result<Self> {
238 use secp256k1::{Secp256k1, SecretKey};
239
240 let sk_bytes = hex::decode(secret_hex).context("Invalid hex in secret key")?;
241 let sk = SecretKey::from_slice(&sk_bytes).context("Invalid secret key")?;
242 let secp = Secp256k1::new();
243 let pk = sk.x_only_public_key(&secp).0;
244 let pubkey_hex = hex::encode(pk.serialize());
245
246 Ok(Self {
247 secret_hex: Some(secret_hex.to_string()),
248 pubkey_hex,
249 petname,
250 })
251 }
252
253 pub fn from_nsec(nsec: &str, petname: Option<String>) -> Result<Self> {
255 let secret_key =
256 SecretKey::parse(nsec).map_err(|e| anyhow::anyhow!("Invalid nsec format: {}", e))?;
257 let secret_hex = hex::encode(secret_key.to_secret_bytes());
258 Self::from_secret_hex(&secret_hex, petname)
259 }
260
261 pub fn from_pubkey_hex(pubkey_hex: &str, petname: Option<String>) -> Result<Self> {
263 let pubkey = PublicKey::from_hex(pubkey_hex)
264 .map_err(|e| anyhow::anyhow!("Invalid pubkey hex: {}", e))?;
265
266 Ok(Self {
267 secret_hex: None,
268 pubkey_hex: hex::encode(pubkey.to_bytes()),
269 petname,
270 })
271 }
272
273 pub fn from_npub(npub: &str, petname: Option<String>) -> Result<Self> {
275 let pubkey =
276 PublicKey::parse(npub).map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
277
278 Ok(Self {
279 secret_hex: None,
280 pubkey_hex: hex::encode(pubkey.to_bytes()),
281 petname,
282 })
283 }
284}
285
286#[derive(Clone, Copy)]
287enum IdentityFileKind {
288 Keys,
289 Aliases,
290}
291
292fn ensure_aliases_file_hint() {
293 let aliases_path = hashtree_config::get_aliases_path();
294 if aliases_path.exists() {
295 return;
296 }
297
298 let Some(parent) = aliases_path.parent() else {
299 return;
300 };
301
302 if !parent.exists() {
303 return;
304 }
305
306 let template = concat!(
307 "# Public read-only aliases for repos you clone or fetch.\n",
308 "# Format: npub1... alias\n",
309 "# Example:\n",
310 "# npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm sirius\n",
311 );
312
313 let _ = std::fs::OpenOptions::new()
314 .write(true)
315 .create_new(true)
316 .open(&aliases_path)
317 .and_then(|mut file| std::io::Write::write_all(&mut file, template.as_bytes()));
318}
319
320fn parse_identity_entry(
321 raw: &str,
322 petname: Option<String>,
323 kind: IdentityFileKind,
324) -> Option<StoredKey> {
325 let key = match kind {
326 IdentityFileKind::Keys => {
327 if raw.starts_with("nsec1") {
328 StoredKey::from_nsec(raw, petname)
329 } else if raw.starts_with("npub1") {
330 StoredKey::from_npub(raw, petname)
331 } else if raw.len() == 64 {
332 StoredKey::from_secret_hex(raw, petname)
333 } else {
334 return None;
335 }
336 }
337 IdentityFileKind::Aliases => {
338 if raw.starts_with("npub1") {
339 StoredKey::from_npub(raw, petname)
340 } else if raw.len() == 64 {
341 StoredKey::from_pubkey_hex(raw, petname)
342 } else {
343 return None;
344 }
345 }
346 };
347
348 key.ok()
349}
350
351fn load_identities_from_path(path: &std::path::Path, kind: IdentityFileKind) -> Vec<StoredKey> {
352 let mut keys = Vec::new();
353
354 if let Ok(content) = std::fs::read_to_string(path) {
355 for entry in hashtree_config::parse_keys_file(&content) {
356 if let Some(key) = parse_identity_entry(&entry.secret, entry.alias, kind) {
357 debug!(
358 "Loaded identity: pubkey={}, petname={:?}, has_secret={}",
359 key.pubkey_hex,
360 key.petname,
361 key.secret_hex.is_some()
362 );
363 keys.push(key);
364 }
365 }
366 }
367
368 keys
369}
370
371fn resolve_self_identity(keys: &[StoredKey]) -> Option<(String, Option<String>)> {
372 keys.iter()
373 .find(|k| k.petname.as_deref() == Some("self") && k.secret_hex.is_some())
374 .or_else(|| {
375 keys.iter()
376 .find(|k| k.petname.as_deref() == Some("default") && k.secret_hex.is_some())
377 })
378 .or_else(|| keys.iter().find(|k| k.secret_hex.is_some()))
379 .map(|key| (key.pubkey_hex.clone(), key.secret_hex.clone()))
380}
381
382pub fn load_keys() -> Vec<StoredKey> {
384 ensure_aliases_file_hint();
385
386 let mut keys =
387 load_identities_from_path(&hashtree_config::get_keys_path(), IdentityFileKind::Keys);
388 keys.extend(load_identities_from_path(
389 &hashtree_config::get_aliases_path(),
390 IdentityFileKind::Aliases,
391 ));
392
393 keys
394}
395
396pub fn resolve_identity(identifier: &str) -> Result<(String, Option<String>)> {
403 let keys = load_keys();
404
405 if identifier == "self" {
407 if let Some(resolved) = resolve_self_identity(&keys) {
408 return Ok(resolved);
409 }
410 let new_key = generate_and_save_key("self")?;
412 info!("Generated new identity: npub1{}", &new_key.pubkey_hex[..12]);
413 return Ok((new_key.pubkey_hex, new_key.secret_hex));
414 }
415
416 for key in &keys {
418 if key.petname.as_deref() == Some(identifier) {
419 return Ok((key.pubkey_hex.clone(), key.secret_hex.clone()));
420 }
421 }
422
423 if identifier.starts_with("npub1") {
425 let pk = PublicKey::parse(identifier)
426 .map_err(|e| anyhow::anyhow!("Invalid npub format: {}", e))?;
427 let pubkey_hex = hex::encode(pk.to_bytes());
428
429 let secret = keys
431 .iter()
432 .find(|k| k.pubkey_hex == pubkey_hex)
433 .and_then(|k| k.secret_hex.clone());
434
435 return Ok((pubkey_hex, secret));
436 }
437
438 if identifier.len() == 64 && hex::decode(identifier).is_ok() {
440 let secret = keys
441 .iter()
442 .find(|k| k.pubkey_hex == identifier)
443 .and_then(|k| k.secret_hex.clone());
444
445 return Ok((identifier.to_string(), secret));
446 }
447
448 anyhow::bail!(
450 "Unknown identity '{}'. Add it to ~/.hashtree/aliases (preferred) or ~/.hashtree/keys, or use a pubkey/npub.",
451 identifier
452 )
453}
454
455fn generate_and_save_key(petname: &str) -> Result<StoredKey> {
457 use std::fs::{self, OpenOptions};
458 use std::io::Write;
459
460 let keys = nostr_sdk::Keys::generate();
462 let secret_hex = hex::encode(keys.secret_key().to_secret_bytes());
463 let pubkey_hex = hex::encode(keys.public_key().to_bytes());
464
465 let keys_path = hashtree_config::get_keys_path();
467 if let Some(parent) = keys_path.parent() {
468 fs::create_dir_all(parent)?;
469 }
470 ensure_aliases_file_hint();
471
472 let mut file = OpenOptions::new()
474 .create(true)
475 .append(true)
476 .open(&keys_path)?;
477
478 let nsec = keys
480 .secret_key()
481 .to_bech32()
482 .map_err(|e| anyhow::anyhow!("Failed to encode nsec: {}", e))?;
483 writeln!(file, "{} {}", nsec, petname)?;
484
485 info!(
486 "Saved new key to {:?} with petname '{}'",
487 keys_path, petname
488 );
489
490 Ok(StoredKey {
491 secret_hex: Some(secret_hex),
492 pubkey_hex,
493 petname: Some(petname.to_string()),
494 })
495}
496
497use hashtree_config::Config;
498
499fn pick_latest_event<'a, I>(events: I) -> Option<&'a Event>
500where
501 I: IntoIterator<Item = &'a Event>,
502{
503 events
505 .into_iter()
506 .max_by_key(|event| (event.created_at, event.id))
507}
508
509fn is_matching_repo_event(event: &Event, repo_name: &str) -> bool {
510 let has_hashtree_label = event.tags.iter().any(|tag| {
511 let slice = tag.as_slice();
512 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_HASHTREE
513 });
514
515 if !has_hashtree_label {
516 return false;
517 }
518
519 event.tags.iter().any(|tag| {
520 let slice = tag.as_slice();
521 slice.len() >= 2 && slice[0].as_str() == "d" && slice[1].as_str() == repo_name
522 })
523}
524
525fn pick_latest_repo_event<'a, I>(events: I, repo_name: &str) -> Option<&'a Event>
526where
527 I: IntoIterator<Item = &'a Event>,
528{
529 pick_latest_event(
530 events
531 .into_iter()
532 .filter(|event| is_matching_repo_event(event, repo_name)),
533 )
534}
535
536fn git_repo_name(event: &Event) -> Option<&str> {
537 let has_hashtree_label = event.tags.iter().any(|tag| {
538 let slice = tag.as_slice();
539 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_HASHTREE
540 });
541 let has_git_label = event.tags.iter().any(|tag| {
542 let slice = tag.as_slice();
543 slice.len() >= 2 && slice[0].as_str() == "l" && slice[1].as_str() == LABEL_GIT
544 });
545 if !has_hashtree_label || !has_git_label {
546 return None;
547 }
548
549 event.tags.iter().find_map(|tag| {
550 let slice = tag.as_slice();
551 if slice.len() < 2 || slice[0].as_str() != "d" {
552 return None;
553 }
554 let repo_name = slice[1].as_str();
555 if repo_name.is_empty() {
556 None
557 } else {
558 Some(repo_name)
559 }
560 })
561}
562
563fn list_git_repo_announcements(events: &[Event]) -> Vec<GitRepoAnnouncement> {
564 let mut latest_by_repo: HashMap<String, (Timestamp, EventId)> = HashMap::new();
565
566 for event in events {
567 let Some(repo_name) = git_repo_name(event) else {
568 continue;
569 };
570
571 let entry = latest_by_repo
572 .entry(repo_name.to_string())
573 .or_insert((event.created_at, event.id));
574 if (event.created_at, event.id) > (entry.0, entry.1) {
575 *entry = (event.created_at, event.id);
576 }
577 }
578
579 let mut repos: Vec<GitRepoAnnouncement> = latest_by_repo
580 .into_iter()
581 .map(|(repo_name, (created_at, event_id))| GitRepoAnnouncement {
582 repo_name,
583 created_at,
584 event_id,
585 })
586 .collect();
587 repos.sort_by(|left, right| left.repo_name.cmp(&right.repo_name));
588 repos
589}
590
591fn build_git_repo_list_filter(author: PublicKey) -> Filter {
592 Filter::new()
593 .kind(Kind::Custom(KIND_APP_DATA))
594 .author(author)
595 .custom_tag(SingleLetterTag::lowercase(Alphabet::L), vec![LABEL_GIT])
596 .limit(500)
597}
598
599fn build_repo_event_filter(author: PublicKey, repo_name: &str) -> Filter {
600 Filter::new()
601 .kind(Kind::Custom(KIND_APP_DATA))
602 .author(author)
603 .custom_tag(SingleLetterTag::lowercase(Alphabet::D), vec![repo_name])
604 .custom_tag(
605 SingleLetterTag::lowercase(Alphabet::L),
606 vec![LABEL_HASHTREE],
607 )
608 .limit(50)
609}
610
611fn next_replaceable_created_at(now: Timestamp, latest_existing: Option<Timestamp>) -> Timestamp {
612 match latest_existing {
613 Some(latest) if latest >= now => Timestamp::from_secs(latest.as_u64().saturating_add(1)),
614 _ => now,
615 }
616}
617
618async fn latest_repo_event_created_at(
619 client: &Client,
620 author: PublicKey,
621 repo_name: &str,
622 timeout: Duration,
623) -> Option<Timestamp> {
624 let events = client
625 .get_events_of(
626 vec![build_repo_event_filter(author, repo_name)],
627 EventSource::relays(Some(timeout)),
628 )
629 .await
630 .ok()?;
631 pick_latest_repo_event(events.iter(), repo_name).map(|event| event.created_at)
632}
633
634fn append_repo_discovery_labels(tags: &mut Vec<Tag>, repo_name: &str) {
635 tags.push(Tag::custom(
636 TagKind::custom("l"),
637 vec![LABEL_GIT.to_string()],
638 ));
639
640 let parts: Vec<&str> = repo_name.split('/').collect();
643 for i in 1..parts.len() {
644 let prefix = parts[..i].join("/");
645 tags.push(Tag::custom(TagKind::custom("l"), vec![prefix]));
646 }
647}
648
649fn relay_host(url: &str) -> Option<&str> {
650 let stripped = url
651 .strip_prefix("ws://")
652 .or_else(|| url.strip_prefix("wss://"))
653 .or_else(|| url.strip_prefix("http://"))
654 .or_else(|| url.strip_prefix("https://"))
655 .unwrap_or(url);
656 let authority = stripped.split('/').next().unwrap_or(stripped);
657 if authority.is_empty() {
658 return None;
659 }
660
661 if let Some(host) = authority.strip_prefix('[') {
662 return host.split(']').next().filter(|value| !value.is_empty());
663 }
664
665 authority
666 .split(':')
667 .next()
668 .map(str::trim)
669 .filter(|value| !value.is_empty())
670}
671
672fn is_local_relay_url(url: &str) -> bool {
673 relay_host(url).is_some_and(|host| {
674 host.eq_ignore_ascii_case("localhost")
675 || host == "127.0.0.1"
676 || host == "::1"
677 || host.starts_with("127.")
678 })
679}
680
681fn has_non_local_relay(urls: &[String]) -> bool {
682 urls.iter().any(|url| !is_local_relay_url(url))
683}
684
685fn validate_repo_publish_relays(configured: &[String], connected: &[String]) -> Result<()> {
686 if connected.is_empty() {
687 anyhow::bail!(
688 "No relay confirmed repo publication. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml."
689 );
690 }
691
692 if has_non_local_relay(configured) && !has_non_local_relay(connected) {
693 anyhow::bail!(
694 "No public relay confirmed repo publication; local relays only: {}. Another machine will not discover this repo via htree://<npub>/... Check [nostr].relays in ~/.hashtree/config.toml.",
695 connected.join(", ")
696 );
697 }
698
699 Ok(())
700}
701
702fn latest_trusted_pr_status_kinds(
703 pr_events: &[Event],
704 status_events: &[Event],
705 repo_owner_pubkey: &str,
706) -> HashMap<String, u16> {
707 let pr_authors: HashMap<String, String> = pr_events
708 .iter()
709 .map(|event| (event.id.to_hex(), event.pubkey.to_hex()))
710 .collect();
711
712 let mut trusted_statuses: HashMap<String, Vec<&Event>> = HashMap::new();
713 for status in status_events {
714 let signer_pubkey = status.pubkey.to_hex();
715 for tag in status.tags.iter() {
716 let slice = tag.as_slice();
717 if slice.len() < 2 || slice[0].as_str() != "e" {
718 continue;
719 }
720
721 let pr_id = slice[1].to_string();
722 let Some(pr_author_pubkey) = pr_authors.get(&pr_id) else {
723 continue;
724 };
725
726 let trusted = if status.kind.as_u16() == KIND_STATUS_APPLIED {
727 signer_pubkey == repo_owner_pubkey
729 } else {
730 signer_pubkey == *pr_author_pubkey || signer_pubkey == repo_owner_pubkey
731 };
732 if trusted {
733 trusted_statuses.entry(pr_id).or_default().push(status);
734 }
735 }
736 }
737
738 let mut latest_status = HashMap::new();
739 for (pr_id, events) in trusted_statuses {
740 if let Some(applied) = pick_latest_event(
743 events
744 .iter()
745 .copied()
746 .filter(|event| event.kind.as_u16() == KIND_STATUS_APPLIED),
747 ) {
748 latest_status.insert(pr_id, applied.kind.as_u16());
749 } else if let Some(latest) = pick_latest_event(events.iter().copied()) {
750 latest_status.insert(pr_id, latest.kind.as_u16());
751 }
752 }
753
754 latest_status
755}
756
757#[derive(Debug, Clone)]
759pub struct RelayResult {
760 #[allow(dead_code)]
762 pub configured: Vec<String>,
763 pub connected: Vec<String>,
765 pub failed: Vec<String>,
767}
768
769#[derive(Debug, Clone)]
771pub struct BlossomResult {
772 #[allow(dead_code)]
774 pub configured: Vec<String>,
775 pub succeeded: Vec<String>,
777 pub failed: Vec<String>,
779}
780
781pub struct NostrClient {
783 pubkey: String,
784 keys: Option<Keys>,
786 relays: Vec<String>,
787 blossom: BlossomClient,
788 cached_refs: HashMap<String, HashMap<String, String>>,
790 cached_root_hash: HashMap<String, String>,
792 cached_encryption_key: HashMap<String, [u8; 32]>,
794 url_secret: Option<[u8; 32]>,
797 is_private: bool,
799 local_daemon_url: Option<String>,
801}
802
803#[derive(Debug, Clone, Default)]
804struct RootEventData {
805 root_hash: String,
806 encryption_key: Option<[u8; 32]>,
807 key_tag_name: Option<String>,
808 self_encrypted_ciphertext: Option<String>,
809}
810
811#[derive(Debug, Deserialize)]
812struct DaemonResolveResponse {
813 hash: Option<String>,
814 #[serde(default, rename = "key_tag")]
815 key: Option<String>,
816 #[serde(default, rename = "encryptedKey")]
817 encrypted_key: Option<String>,
818 #[serde(default, rename = "selfEncryptedKey")]
819 self_encrypted_key: Option<String>,
820 #[serde(default)]
821 source: Option<String>,
822}
823
824impl NostrClient {
825 pub fn new(
827 pubkey: &str,
828 secret_key: Option<String>,
829 url_secret: Option<[u8; 32]>,
830 is_private: bool,
831 config: &Config,
832 ) -> Result<Self> {
833 let _ = rustls::crypto::ring::default_provider().install_default();
835
836 let secret_key = secret_key.or_else(|| std::env::var("NOSTR_SECRET_KEY").ok());
838
839 let keys = if let Some(ref secret_hex) = secret_key {
841 let secret_bytes = hex::decode(secret_hex).context("Invalid secret key hex")?;
842 let secret = nostr::SecretKey::from_slice(&secret_bytes)
843 .map_err(|e| anyhow::anyhow!("Invalid secret key: {}", e))?;
844 Some(Keys::new(secret))
845 } else {
846 None
847 };
848
849 let blossom_keys = keys.clone().unwrap_or_else(Keys::generate);
852 let blossom = BlossomClient::new(blossom_keys).with_timeout(Duration::from_secs(30));
853
854 tracing::info!(
855 "BlossomClient created with read_servers: {:?}, write_servers: {:?}",
856 blossom.read_servers(),
857 blossom.write_servers()
858 );
859
860 let relays = hashtree_config::resolve_relays(
861 &config.nostr.relays,
862 Some(config.server.bind_address.as_str()),
863 );
864 let local_daemon_url =
865 hashtree_config::detect_local_daemon_url(Some(config.server.bind_address.as_str()))
866 .or_else(|| {
867 config
868 .blossom
869 .read_servers
870 .iter()
871 .find(|url| {
872 url.starts_with("http://127.0.0.1:")
873 || url.starts_with("http://localhost:")
874 })
875 .cloned()
876 });
877
878 Ok(Self {
879 pubkey: pubkey.to_string(),
880 keys,
881 relays,
882 blossom,
883 cached_refs: HashMap::new(),
884 cached_root_hash: HashMap::new(),
885 cached_encryption_key: HashMap::new(),
886 url_secret,
887 is_private,
888 local_daemon_url,
889 })
890 }
891
892 fn format_repo_author(pubkey_hex: &str) -> String {
893 PublicKey::from_hex(pubkey_hex)
894 .ok()
895 .and_then(|pk| pk.to_bech32().ok())
896 .unwrap_or_else(|| pubkey_hex.to_string())
897 }
898
899 #[allow(dead_code)]
901 pub fn can_sign(&self) -> bool {
902 self.keys.is_some()
903 }
904
905 pub fn list_repos(&self) -> Result<Vec<String>> {
906 let rt = tokio::runtime::Builder::new_multi_thread()
907 .enable_all()
908 .build()
909 .context("Failed to create tokio runtime")?;
910
911 let result = rt.block_on(self.list_repos_async());
912 rt.shutdown_timeout(Duration::from_millis(500));
913 result
914 }
915
916 pub async fn list_repos_async(&self) -> Result<Vec<String>> {
917 let client = Client::default();
918
919 for relay in &self.relays {
920 if let Err(e) = client.add_relay(relay).await {
921 warn!("Failed to add relay {}: {}", relay, e);
922 }
923 }
924 client.connect().await;
925
926 let start = std::time::Instant::now();
927 loop {
928 let relays = client.relays().await;
929 let mut connected = false;
930 for relay in relays.values() {
931 if relay.is_connected().await {
932 connected = true;
933 break;
934 }
935 }
936 if connected {
937 break;
938 }
939 if start.elapsed() > Duration::from_secs(2) {
940 let _ = client.disconnect().await;
941 return Err(anyhow::anyhow!(
942 "Failed to connect to any relay while listing repos"
943 ));
944 }
945 tokio::time::sleep(Duration::from_millis(50)).await;
946 }
947
948 let author = PublicKey::from_hex(&self.pubkey)
949 .map_err(|e| anyhow::anyhow!("Invalid pubkey: {}", e))?;
950 let filter = build_git_repo_list_filter(author);
951
952 let events = match tokio::time::timeout(
953 Duration::from_secs(3),
954 client.get_events_of(vec![filter], EventSource::relays(None)),
955 )
956 .await
957 {
958 Ok(Ok(events)) => events,
959 Ok(Err(e)) => {
960 let _ = client.disconnect().await;
961 return Err(anyhow::anyhow!(
962 "Failed to fetch git repo events from relays: {}",
963 e
964 ));
965 }
966 Err(_) => {
967 let _ = client.disconnect().await;
968 return Err(anyhow::anyhow!(
969 "Timed out fetching git repo events from relays"
970 ));
971 }
972 };
973
974 let _ = client.disconnect().await;
975
976 Ok(list_git_repo_announcements(&events)
977 .into_iter()
978 .map(|repo| repo.repo_name)
979 .collect())
980 }
981
982 pub fn fetch_refs(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
985 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 10)?;
986 Ok(refs)
987 }
988
989 #[allow(dead_code)]
992 pub fn fetch_refs_quick(&mut self, repo_name: &str) -> Result<HashMap<String, String>> {
993 let (refs, _, _) = self.fetch_refs_with_timeout(repo_name, 3)?;
994 Ok(refs)
995 }
996
997 #[allow(dead_code)]
1000 pub fn fetch_refs_with_root(&mut self, repo_name: &str) -> Result<FetchedRefs> {
1001 self.fetch_refs_with_timeout(repo_name, 10)
1002 }
1003
1004 fn fetch_refs_with_timeout(
1006 &mut self,
1007 repo_name: &str,
1008 timeout_secs: u64,
1009 ) -> Result<FetchedRefs> {
1010 debug!(
1011 "Fetching refs for {} from {} (timeout {}s)",
1012 repo_name, self.pubkey, timeout_secs
1013 );
1014
1015 if let Some(refs) = self.cached_refs.get(repo_name) {
1017 let root = self.cached_root_hash.get(repo_name).cloned();
1018 let key = self.cached_encryption_key.get(repo_name).cloned();
1019 return Ok((refs.clone(), root, key));
1020 }
1021
1022 let rt = tokio::runtime::Builder::new_multi_thread()
1025 .enable_all()
1026 .build()
1027 .context("Failed to create tokio runtime")?;
1028
1029 let (refs, root_hash, encryption_key) =
1030 rt.block_on(self.fetch_refs_async_with_timeout(repo_name, timeout_secs))?;
1031 self.cached_refs.insert(repo_name.to_string(), refs.clone());
1032 if let Some(ref root) = root_hash {
1033 self.cached_root_hash
1034 .insert(repo_name.to_string(), root.clone());
1035 }
1036 if let Some(key) = encryption_key {
1037 self.cached_encryption_key
1038 .insert(repo_name.to_string(), key);
1039 }
1040 Ok((refs, root_hash, encryption_key))
1041 }
1042
1043 fn parse_root_event_data_from_event(event: &Event) -> RootEventData {
1044 let root_hash = event
1045 .tags
1046 .iter()
1047 .find(|t| t.as_slice().len() >= 2 && t.as_slice()[0].as_str() == "hash")
1048 .map(|t| t.as_slice()[1].to_string())
1049 .unwrap_or_else(|| event.content.to_string());
1050
1051 let (encryption_key, key_tag_name, self_encrypted_ciphertext) = event
1052 .tags
1053 .iter()
1054 .find_map(|t| {
1055 let slice = t.as_slice();
1056 if slice.len() < 2 {
1057 return None;
1058 }
1059 let tag_name = slice[0].as_str();
1060 let tag_value = slice[1].to_string();
1061 if tag_name == "selfEncryptedKey" {
1062 return Some((None, Some(tag_name.to_string()), Some(tag_value)));
1063 }
1064 if tag_name == "key" || tag_name == "encryptedKey" {
1065 if let Ok(bytes) = hex::decode(&tag_value) {
1066 if bytes.len() == 32 {
1067 let mut key = [0u8; 32];
1068 key.copy_from_slice(&bytes);
1069 return Some((Some(key), Some(tag_name.to_string()), None));
1070 }
1071 }
1072 }
1073 None
1074 })
1075 .unwrap_or((None, None, None));
1076
1077 RootEventData {
1078 root_hash,
1079 encryption_key,
1080 key_tag_name,
1081 self_encrypted_ciphertext,
1082 }
1083 }
1084
1085 fn parse_daemon_response_to_root_data(
1086 response: DaemonResolveResponse,
1087 ) -> Option<RootEventData> {
1088 let root_hash = response.hash?;
1089 if root_hash.is_empty() {
1090 return None;
1091 }
1092
1093 let mut data = RootEventData {
1094 root_hash,
1095 encryption_key: None,
1096 key_tag_name: None,
1097 self_encrypted_ciphertext: None,
1098 };
1099
1100 if let Some(ciphertext) = response.self_encrypted_key {
1101 data.key_tag_name = Some("selfEncryptedKey".to_string());
1102 data.self_encrypted_ciphertext = Some(ciphertext);
1103 return Some(data);
1104 }
1105
1106 let (tag_name, tag_value) = if let Some(v) = response.encrypted_key {
1107 ("encryptedKey", v)
1108 } else if let Some(v) = response.key {
1109 ("key", v)
1110 } else {
1111 return Some(data);
1112 };
1113
1114 if let Ok(bytes) = hex::decode(&tag_value) {
1115 if bytes.len() == 32 {
1116 let mut key = [0u8; 32];
1117 key.copy_from_slice(&bytes);
1118 data.encryption_key = Some(key);
1119 data.key_tag_name = Some(tag_name.to_string());
1120 }
1121 }
1122
1123 Some(data)
1124 }
1125
1126 async fn fetch_root_from_local_daemon(
1127 &self,
1128 repo_name: &str,
1129 timeout: Duration,
1130 ) -> Option<RootEventData> {
1131 let base = self.local_daemon_url.as_ref()?;
1132 let url = format!(
1133 "{}/api/nostr/resolve/{}/{}",
1134 base.trim_end_matches('/'),
1135 self.pubkey,
1136 repo_name
1137 );
1138
1139 let client = reqwest::Client::builder().timeout(timeout).build().ok()?;
1140 let response = client.get(&url).send().await.ok()?;
1141 if !response.status().is_success() {
1142 return None;
1143 }
1144
1145 let payload: DaemonResolveResponse = response.json().await.ok()?;
1146 let source = payload
1147 .source
1148 .clone()
1149 .unwrap_or_else(|| "unknown".to_string());
1150 let parsed = Self::parse_daemon_response_to_root_data(payload)?;
1151 debug!(
1152 "Resolved repo {} via local daemon source={}",
1153 repo_name, source
1154 );
1155 Some(parsed)
1156 }
1157
1158 async fn fetch_refs_async_with_timeout(
1159 &self,
1160 repo_name: &str,
1161 timeout_secs: u64,
1162 ) -> Result<(HashMap<String, String>, Option<String>, Option<[u8; 32]>)> {
1163 let client = Client::default();
1165
1166 for relay in &self.relays {
1168 if let Err(e) = client.add_relay(relay).await {
1169 warn!("Failed to add relay {}: {}", relay, e);
1170 }
1171 }
1172
1173 client.connect().await;
1175
1176 let connect_timeout = Duration::from_secs(2);
1177 let query_timeout = Duration::from_secs(timeout_secs.saturating_sub(2).max(3));
1178 let local_daemon_timeout = Duration::from_secs(4);
1179 let retry_delay = Duration::from_millis(300);
1180 let max_attempts = 2;
1181
1182 let start = std::time::Instant::now();
1183
1184 let author = PublicKey::from_hex(&self.pubkey)
1186 .map_err(|e| anyhow::anyhow!("Invalid pubkey: {}", e))?;
1187
1188 let filter = build_repo_event_filter(author, repo_name);
1189
1190 debug!("Querying relays for repo {} events", repo_name);
1191
1192 let mut root_data = None;
1193 for attempt in 1..=max_attempts {
1194 let connect_start = std::time::Instant::now();
1197 let mut last_log = std::time::Instant::now();
1198 let mut has_connected_relay = false;
1199 loop {
1200 let relays = client.relays().await;
1201 let total = relays.len();
1202 let mut connected = 0;
1203 for relay in relays.values() {
1204 if relay.is_connected().await {
1205 connected += 1;
1206 }
1207 }
1208 if connected > 0 {
1209 debug!(
1210 "Connected to {}/{} relay(s) in {:?} (attempt {}/{})",
1211 connected,
1212 total,
1213 start.elapsed(),
1214 attempt,
1215 max_attempts
1216 );
1217 has_connected_relay = true;
1218 break;
1219 }
1220 if last_log.elapsed() > Duration::from_millis(500) {
1221 debug!(
1222 "Connecting to relays... (0/{} after {:?}, attempt {}/{})",
1223 total,
1224 start.elapsed(),
1225 attempt,
1226 max_attempts
1227 );
1228 last_log = std::time::Instant::now();
1229 }
1230 if connect_start.elapsed() > connect_timeout {
1231 debug!(
1232 "Timeout waiting for relay connections - continuing with local-daemon fallback"
1233 );
1234 break;
1235 }
1236 tokio::time::sleep(Duration::from_millis(50)).await;
1237 }
1238
1239 let events = if has_connected_relay {
1243 match client
1244 .get_events_of(
1245 vec![filter.clone()],
1246 EventSource::relays(Some(query_timeout)),
1247 )
1248 .await
1249 {
1250 Ok(events) => events,
1251 Err(e) => {
1252 warn!("Failed to fetch events: {}", e);
1253 vec![]
1254 }
1255 }
1256 } else {
1257 vec![]
1258 };
1259
1260 debug!(
1261 "Got {} events from relays on attempt {}/{}",
1262 events.len(),
1263 attempt,
1264 max_attempts
1265 );
1266 let relay_event = pick_latest_repo_event(events.iter(), repo_name);
1267
1268 if let Some(event) = relay_event {
1269 debug!(
1270 "Found relay event with root hash: {}",
1271 &event.content[..12.min(event.content.len())]
1272 );
1273 root_data = Some(Self::parse_root_event_data_from_event(event));
1274 break;
1275 }
1276
1277 if let Some(data) = self
1278 .fetch_root_from_local_daemon(repo_name, local_daemon_timeout)
1279 .await
1280 {
1281 root_data = Some(data);
1282 break;
1283 }
1284
1285 if attempt < max_attempts {
1286 debug!(
1287 "No hashtree event found for {} on attempt {}/{}; retrying",
1288 repo_name, attempt, max_attempts
1289 );
1290 tokio::time::sleep(retry_delay).await;
1291 }
1292 }
1293
1294 let _ = client.disconnect().await;
1296
1297 let root_data = match root_data {
1298 Some(data) => data,
1299 None => {
1300 anyhow::bail!(
1301 "Repository '{}' not found (no hashtree event published by {})",
1302 repo_name,
1303 Self::format_repo_author(&self.pubkey)
1304 );
1305 }
1306 };
1307
1308 let root_hash = root_data.root_hash;
1309
1310 if root_hash.is_empty() {
1311 debug!("Empty root hash in event");
1312 return Ok((HashMap::new(), None, None));
1313 }
1314
1315 let encryption_key = root_data.encryption_key;
1316 let key_tag_name = root_data.key_tag_name;
1317 let self_encrypted_ciphertext = root_data.self_encrypted_ciphertext;
1318
1319 let unmasked_key = match key_tag_name.as_deref() {
1321 Some("encryptedKey") => {
1322 if let (Some(masked), Some(secret)) = (encryption_key, self.url_secret) {
1324 let mut unmasked = [0u8; 32];
1325 for i in 0..32 {
1326 unmasked[i] = masked[i] ^ secret[i];
1327 }
1328 Some(unmasked)
1329 } else {
1330 anyhow::bail!(
1331 "This repo is link-visible and requires a secret key.\n\
1332 Use: htree://.../{repo_name}#k=<secret>\n\
1333 Ask the repo owner for the full URL with the secret."
1334 );
1335 }
1336 }
1337 Some("selfEncryptedKey") => {
1338 if !self.is_private {
1340 anyhow::bail!(
1341 "This repo is private (author-only).\n\
1342 Use: htree://.../{repo_name}#private\n\
1343 Only the author can access this repo."
1344 );
1345 }
1346
1347 if let Some(keys) = &self.keys {
1349 if let Some(ciphertext) = self_encrypted_ciphertext {
1350 let pubkey = keys.public_key();
1352 match nip44::decrypt(keys.secret_key(), &pubkey, &ciphertext) {
1353 Ok(key_hex) => {
1354 let key_bytes =
1355 hex::decode(&key_hex).context("Invalid decrypted key hex")?;
1356 if key_bytes.len() != 32 {
1357 anyhow::bail!("Decrypted key wrong length");
1358 }
1359 let mut key = [0u8; 32];
1360 key.copy_from_slice(&key_bytes);
1361 Some(key)
1362 }
1363 Err(e) => {
1364 anyhow::bail!(
1365 "Failed to decrypt private repo: {}\n\
1366 The repo may be corrupted or published with a different key.",
1367 e
1368 );
1369 }
1370 }
1371 } else {
1372 anyhow::bail!("selfEncryptedKey tag has invalid format");
1373 }
1374 } else {
1375 anyhow::bail!(
1376 "Cannot access this private repo.\n\
1377 Private repos can only be accessed by their author.\n\
1378 You don't have the secret key for this repo's owner."
1379 );
1380 }
1381 }
1382 Some("key") | None => {
1383 encryption_key
1385 }
1386 Some(other) => {
1387 warn!("Unknown key tag type: {}", other);
1388 encryption_key
1389 }
1390 };
1391
1392 info!(
1393 "Found root hash {} for {} (encrypted: {}, link_visible: {})",
1394 &root_hash[..12.min(root_hash.len())],
1395 repo_name,
1396 unmasked_key.is_some(),
1397 self.url_secret.is_some()
1398 );
1399
1400 let refs = self
1402 .fetch_refs_from_hashtree(&root_hash, unmasked_key.as_ref())
1403 .await?;
1404 Ok((refs, Some(root_hash), unmasked_key))
1405 }
1406
1407 fn decrypt_and_decode(
1409 &self,
1410 data: &[u8],
1411 key: Option<&[u8; 32]>,
1412 ) -> Option<hashtree_core::TreeNode> {
1413 let decrypted_data: Vec<u8>;
1414 let data_to_decode = if let Some(k) = key {
1415 match decrypt_chk(data, k) {
1416 Ok(d) => {
1417 decrypted_data = d;
1418 &decrypted_data
1419 }
1420 Err(e) => {
1421 debug!("Decryption failed: {}", e);
1422 return None;
1423 }
1424 }
1425 } else {
1426 data
1427 };
1428
1429 match decode_tree_node(data_to_decode) {
1430 Ok(node) => Some(node),
1431 Err(e) => {
1432 debug!("Failed to decode tree node: {}", e);
1433 None
1434 }
1435 }
1436 }
1437
1438 async fn fetch_refs_from_hashtree(
1441 &self,
1442 root_hash: &str,
1443 encryption_key: Option<&[u8; 32]>,
1444 ) -> Result<HashMap<String, String>> {
1445 let mut refs = HashMap::new();
1446 debug!(
1447 "fetch_refs_from_hashtree: downloading root {}",
1448 &root_hash[..12]
1449 );
1450
1451 let root_data = match self.blossom.download(root_hash).await {
1453 Ok(data) => {
1454 debug!("Downloaded {} bytes from blossom", data.len());
1455 data
1456 }
1457 Err(e) => {
1458 anyhow::bail!(
1459 "Failed to download root hash {}: {}",
1460 &root_hash[..12.min(root_hash.len())],
1461 e
1462 );
1463 }
1464 };
1465
1466 let root_node = match self.decrypt_and_decode(&root_data, encryption_key) {
1468 Some(node) => {
1469 debug!("Decoded root node with {} links", node.links.len());
1470 node
1471 }
1472 None => {
1473 debug!(
1474 "Failed to decode root node (encryption_key: {})",
1475 encryption_key.is_some()
1476 );
1477 return Ok(refs);
1478 }
1479 };
1480
1481 debug!(
1483 "Root links: {:?}",
1484 root_node
1485 .links
1486 .iter()
1487 .map(|l| l.name.as_deref())
1488 .collect::<Vec<_>>()
1489 );
1490 let git_link = root_node
1491 .links
1492 .iter()
1493 .find(|l| l.name.as_deref() == Some(".git"));
1494 let (git_hash, git_key) = match git_link {
1495 Some(link) => {
1496 debug!("Found .git link with key: {}", link.key.is_some());
1497 (hex::encode(link.hash), link.key)
1498 }
1499 None => {
1500 debug!("No .git directory in hashtree root");
1501 return Ok(refs);
1502 }
1503 };
1504
1505 let git_data = match self.blossom.download(&git_hash).await {
1507 Ok(data) => data,
1508 Err(e) => {
1509 anyhow::bail!(
1510 "Failed to download .git directory ({}): {}",
1511 &git_hash[..12],
1512 e
1513 );
1514 }
1515 };
1516
1517 let git_node = match self.decrypt_and_decode(&git_data, git_key.as_ref()) {
1518 Some(node) => {
1519 debug!(
1520 "Decoded .git node with {} links: {:?}",
1521 node.links.len(),
1522 node.links
1523 .iter()
1524 .map(|l| l.name.as_deref())
1525 .collect::<Vec<_>>()
1526 );
1527 node
1528 }
1529 None => {
1530 debug!("Failed to decode .git node (key: {})", git_key.is_some());
1531 return Ok(refs);
1532 }
1533 };
1534
1535 let refs_link = git_node
1537 .links
1538 .iter()
1539 .find(|l| l.name.as_deref() == Some("refs"));
1540 let (refs_hash, refs_key) = match refs_link {
1541 Some(link) => (hex::encode(link.hash), link.key),
1542 None => {
1543 debug!("No refs directory in .git");
1544 return Ok(refs);
1545 }
1546 };
1547
1548 let refs_data = match self.blossom.try_download(&refs_hash).await {
1550 Some(data) => data,
1551 None => {
1552 debug!("Could not download refs directory");
1553 return Ok(refs);
1554 }
1555 };
1556
1557 let refs_node = match self.decrypt_and_decode(&refs_data, refs_key.as_ref()) {
1558 Some(node) => node,
1559 None => {
1560 return Ok(refs);
1561 }
1562 };
1563
1564 if let Some(head_link) = git_node
1566 .links
1567 .iter()
1568 .find(|l| l.name.as_deref() == Some("HEAD"))
1569 {
1570 let head_hash = hex::encode(head_link.hash);
1571 if let Some(head_data) = self.blossom.try_download(&head_hash).await {
1572 let head_content = if let Some(k) = head_link.key.as_ref() {
1574 match decrypt_chk(&head_data, k) {
1575 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1576 Err(_) => String::from_utf8_lossy(&head_data).trim().to_string(),
1577 }
1578 } else {
1579 String::from_utf8_lossy(&head_data).trim().to_string()
1580 };
1581 refs.insert("HEAD".to_string(), head_content);
1582 }
1583 }
1584
1585 for subdir_link in &refs_node.links {
1587 if subdir_link.link_type != LinkType::Dir {
1588 continue;
1589 }
1590 let subdir_name = match &subdir_link.name {
1591 Some(n) => n.clone(),
1592 None => continue,
1593 };
1594 let subdir_hash = hex::encode(subdir_link.hash);
1595
1596 self.collect_refs_recursive(
1597 &subdir_hash,
1598 subdir_link.key.as_ref(),
1599 &format!("refs/{}", subdir_name),
1600 &mut refs,
1601 )
1602 .await;
1603 }
1604
1605 debug!("Found {} refs from hashtree", refs.len());
1606 Ok(refs)
1607 }
1608
1609 async fn collect_refs_recursive(
1611 &self,
1612 dir_hash: &str,
1613 dir_key: Option<&[u8; 32]>,
1614 prefix: &str,
1615 refs: &mut HashMap<String, String>,
1616 ) {
1617 let dir_data = match self.blossom.try_download(dir_hash).await {
1618 Some(data) => data,
1619 None => return,
1620 };
1621
1622 let dir_node = match self.decrypt_and_decode(&dir_data, dir_key) {
1623 Some(node) => node,
1624 None => return,
1625 };
1626
1627 for link in &dir_node.links {
1628 let name = match &link.name {
1629 Some(n) => n.clone(),
1630 None => continue,
1631 };
1632 let link_hash = hex::encode(link.hash);
1633 let ref_path = format!("{}/{}", prefix, name);
1634
1635 if link.link_type == LinkType::Dir {
1636 Box::pin(self.collect_refs_recursive(
1638 &link_hash,
1639 link.key.as_ref(),
1640 &ref_path,
1641 refs,
1642 ))
1643 .await;
1644 } else {
1645 if let Some(ref_data) = self.blossom.try_download(&link_hash).await {
1647 let sha = if let Some(k) = link.key.as_ref() {
1649 match decrypt_chk(&ref_data, k) {
1650 Ok(d) => String::from_utf8_lossy(&d).trim().to_string(),
1651 Err(_) => String::from_utf8_lossy(&ref_data).trim().to_string(),
1652 }
1653 } else {
1654 String::from_utf8_lossy(&ref_data).trim().to_string()
1655 };
1656 if !sha.is_empty() {
1657 debug!("Found ref {} -> {}", ref_path, sha);
1658 refs.insert(ref_path, sha);
1659 }
1660 }
1661 }
1662 }
1663 }
1664
1665 #[allow(dead_code)]
1667 pub fn update_ref(&mut self, repo_name: &str, ref_name: &str, sha: &str) -> Result<()> {
1668 info!("Updating ref {} -> {} for {}", ref_name, sha, repo_name);
1669
1670 let refs = self.cached_refs.entry(repo_name.to_string()).or_default();
1671 refs.insert(ref_name.to_string(), sha.to_string());
1672
1673 Ok(())
1674 }
1675
1676 pub fn delete_ref(&mut self, repo_name: &str, ref_name: &str) -> Result<()> {
1678 info!("Deleting ref {} for {}", ref_name, repo_name);
1679
1680 if let Some(refs) = self.cached_refs.get_mut(repo_name) {
1681 refs.remove(ref_name);
1682 }
1683
1684 Ok(())
1685 }
1686
1687 pub fn get_cached_root_hash(&self, repo_name: &str) -> Option<&String> {
1689 self.cached_root_hash.get(repo_name)
1690 }
1691
1692 pub fn get_cached_encryption_key(&self, repo_name: &str) -> Option<&[u8; 32]> {
1694 self.cached_encryption_key.get(repo_name)
1695 }
1696
1697 pub fn blossom(&self) -> &BlossomClient {
1699 &self.blossom
1700 }
1701
1702 pub fn relay_urls(&self) -> Vec<String> {
1704 self.relays.clone()
1705 }
1706
1707 #[allow(dead_code)]
1709 pub fn pubkey(&self) -> &str {
1710 &self.pubkey
1711 }
1712
1713 pub fn npub(&self) -> String {
1715 PublicKey::from_hex(&self.pubkey)
1716 .ok()
1717 .and_then(|pk| pk.to_bech32().ok())
1718 .unwrap_or_else(|| self.pubkey.clone())
1719 }
1720
1721 pub fn publish_repo(
1729 &self,
1730 repo_name: &str,
1731 root_hash: &str,
1732 encryption_key: Option<(&[u8; 32], bool, bool)>,
1733 ) -> Result<(String, RelayResult)> {
1734 let keys = self.keys.as_ref().context(format!(
1735 "Cannot push: no secret key for {}. You can only push to your own repos.",
1736 &self.pubkey[..16]
1737 ))?;
1738
1739 info!(
1740 "Publishing repo {} with root hash {} (encrypted: {})",
1741 repo_name,
1742 root_hash,
1743 encryption_key.is_some()
1744 );
1745
1746 let rt = tokio::runtime::Builder::new_multi_thread()
1748 .enable_all()
1749 .build()
1750 .context("Failed to create tokio runtime")?;
1751
1752 let result =
1753 rt.block_on(self.publish_repo_async(keys, repo_name, root_hash, encryption_key));
1754
1755 rt.shutdown_timeout(std::time::Duration::from_millis(500));
1758
1759 result
1760 }
1761
1762 async fn publish_repo_async(
1763 &self,
1764 keys: &Keys,
1765 repo_name: &str,
1766 root_hash: &str,
1767 encryption_key: Option<(&[u8; 32], bool, bool)>,
1768 ) -> Result<(String, RelayResult)> {
1769 let client = Client::new(keys.clone());
1771
1772 let configured: Vec<String> = self.relays.clone();
1773 let mut connected: Vec<String> = Vec::new();
1774 let mut failed: Vec<String> = Vec::new();
1775
1776 for relay in &self.relays {
1778 if let Err(e) = client.add_relay(relay).await {
1779 warn!("Failed to add relay {}: {}", relay, e);
1780 failed.push(relay.clone());
1781 }
1782 }
1783
1784 client.connect().await;
1786
1787 let connect_timeout = Duration::from_secs(3);
1789 let start = std::time::Instant::now();
1790 loop {
1791 let relays = client.relays().await;
1792 let mut any_connected = false;
1793 for (_url, relay) in relays.iter() {
1794 if relay.is_connected().await {
1795 any_connected = true;
1796 break;
1797 }
1798 }
1799 if any_connected {
1800 break;
1801 }
1802 if start.elapsed() > connect_timeout {
1803 break;
1804 }
1805 tokio::time::sleep(Duration::from_millis(50)).await;
1806 }
1807
1808 let publish_created_at = next_replaceable_created_at(
1809 Timestamp::now(),
1810 latest_repo_event_created_at(
1811 &client,
1812 keys.public_key(),
1813 repo_name,
1814 Duration::from_secs(2),
1815 )
1816 .await,
1817 );
1818
1819 let mut tags = vec![
1821 Tag::custom(TagKind::custom("d"), vec![repo_name.to_string()]),
1822 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
1823 Tag::custom(TagKind::custom("hash"), vec![root_hash.to_string()]),
1824 ];
1825
1826 if let Some((key, is_link_visible, is_self_private)) = encryption_key {
1832 if is_self_private {
1833 let pubkey = keys.public_key();
1835 let key_hex = hex::encode(key);
1836 let encrypted =
1837 nip44::encrypt(keys.secret_key(), &pubkey, &key_hex, nip44::Version::V2)
1838 .map_err(|e| anyhow::anyhow!("NIP-44 encryption failed: {}", e))?;
1839 tags.push(Tag::custom(
1840 TagKind::custom("selfEncryptedKey"),
1841 vec![encrypted],
1842 ));
1843 } else if is_link_visible {
1844 tags.push(Tag::custom(
1846 TagKind::custom("encryptedKey"),
1847 vec![hex::encode(key)],
1848 ));
1849 } else {
1850 tags.push(Tag::custom(TagKind::custom("key"), vec![hex::encode(key)]));
1852 }
1853 }
1854
1855 append_repo_discovery_labels(&mut tags, repo_name);
1856
1857 let event = EventBuilder::new(Kind::Custom(KIND_APP_DATA), root_hash, tags)
1859 .custom_created_at(publish_created_at)
1860 .to_event(keys)
1861 .map_err(|e| anyhow::anyhow!("Failed to sign event: {}", e))?;
1862
1863 match client.send_event(event.clone()).await {
1865 Ok(output) => {
1866 for url in output.success.iter() {
1868 let url_str = url.to_string();
1869 if !connected.contains(&url_str) {
1870 connected.push(url_str);
1871 }
1872 }
1873 for (url, err) in output.failed.iter() {
1875 if err.is_some() {
1876 let url_str = url.to_string();
1877 if !failed.contains(&url_str) && !connected.contains(&url_str) {
1878 failed.push(url_str);
1879 }
1880 }
1881 }
1882 info!(
1883 "Sent event {} to {} relays ({} failed)",
1884 output.id(),
1885 output.success.len(),
1886 output.failed.len()
1887 );
1888 }
1889 Err(e) => {
1890 warn!("Failed to send event: {}", e);
1891 for relay in &self.relays {
1893 if !failed.contains(relay) {
1894 failed.push(relay.clone());
1895 }
1896 }
1897 }
1898 };
1899
1900 let npub_url = keys
1902 .public_key()
1903 .to_bech32()
1904 .map(|npub| format!("htree://{}/{}", npub, repo_name))
1905 .unwrap_or_else(|_| format!("htree://{}/{}", &self.pubkey[..16], repo_name));
1906
1907 let relay_validation = validate_repo_publish_relays(&configured, &connected);
1908
1909 let _ = client.disconnect().await;
1911 tokio::time::sleep(Duration::from_millis(50)).await;
1912
1913 relay_validation?;
1914
1915 Ok((
1916 npub_url,
1917 RelayResult {
1918 configured,
1919 connected,
1920 failed,
1921 },
1922 ))
1923 }
1924
1925 pub fn fetch_prs(
1927 &self,
1928 repo_name: &str,
1929 state_filter: PullRequestStateFilter,
1930 ) -> Result<Vec<PullRequestListItem>> {
1931 let rt = tokio::runtime::Builder::new_multi_thread()
1932 .enable_all()
1933 .build()
1934 .context("Failed to create tokio runtime")?;
1935
1936 let result = rt.block_on(self.fetch_prs_async(repo_name, state_filter));
1937 rt.shutdown_timeout(Duration::from_millis(500));
1938 result
1939 }
1940
1941 pub async fn fetch_prs_async(
1942 &self,
1943 repo_name: &str,
1944 state_filter: PullRequestStateFilter,
1945 ) -> Result<Vec<PullRequestListItem>> {
1946 let client = Client::default();
1947
1948 for relay in &self.relays {
1949 if let Err(e) = client.add_relay(relay).await {
1950 warn!("Failed to add relay {}: {}", relay, e);
1951 }
1952 }
1953 client.connect().await;
1954
1955 let start = std::time::Instant::now();
1957 loop {
1958 let relays = client.relays().await;
1959 let mut connected = false;
1960 for relay in relays.values() {
1961 if relay.is_connected().await {
1962 connected = true;
1963 break;
1964 }
1965 }
1966 if connected {
1967 break;
1968 }
1969 if start.elapsed() > Duration::from_secs(2) {
1970 let _ = client.disconnect().await;
1971 return Err(anyhow::anyhow!(
1972 "Failed to connect to any relay while fetching PRs"
1973 ));
1974 }
1975 tokio::time::sleep(Duration::from_millis(50)).await;
1976 }
1977
1978 let repo_address = format!("{}:{}:{}", KIND_REPO_ANNOUNCEMENT, self.pubkey, repo_name);
1980 let pull_request_filter = Filter::new()
1981 .kind(Kind::Custom(KIND_PULL_REQUEST))
1982 .custom_tag(SingleLetterTag::lowercase(Alphabet::A), vec![&repo_address]);
1983
1984 let mut pr_events = match tokio::time::timeout(
1985 Duration::from_secs(3),
1986 client.get_events_of(vec![pull_request_filter.clone()], EventSource::relays(None)),
1987 )
1988 .await
1989 {
1990 Ok(Ok(events)) => events,
1991 Ok(Err(e)) => {
1992 let _ = client.disconnect().await;
1993 return Err(anyhow::anyhow!(
1994 "Failed to fetch PR events from relays: {}",
1995 e
1996 ));
1997 }
1998 Err(_) => {
1999 let _ = client.disconnect().await;
2000 return Err(anyhow::anyhow!("Timed out fetching PR events from relays"));
2001 }
2002 };
2003
2004 if pr_events.is_empty() {
2005 let fallback_events = fetch_events_via_raw_relay_query(
2006 &self.relays,
2007 pull_request_filter,
2008 Duration::from_secs(3),
2009 )
2010 .await;
2011 if !fallback_events.is_empty() {
2012 debug!(
2013 "Raw relay fallback recovered {} PR event(s) for {}",
2014 fallback_events.len(),
2015 repo_name
2016 );
2017 pr_events = fallback_events;
2018 }
2019 }
2020
2021 if pr_events.is_empty() {
2022 let _ = client.disconnect().await;
2023 return Ok(Vec::new());
2024 }
2025
2026 let pr_ids: Vec<String> = pr_events.iter().map(|e| e.id.to_hex()).collect();
2028
2029 let status_event_filter = Filter::new()
2031 .kinds(vec![
2032 Kind::Custom(KIND_STATUS_OPEN),
2033 Kind::Custom(KIND_STATUS_APPLIED),
2034 Kind::Custom(KIND_STATUS_CLOSED),
2035 Kind::Custom(KIND_STATUS_DRAFT),
2036 ])
2037 .custom_tag(
2038 SingleLetterTag::lowercase(Alphabet::E),
2039 pr_ids.iter().map(|s| s.as_str()).collect::<Vec<_>>(),
2040 );
2041
2042 let mut status_events = match tokio::time::timeout(
2043 Duration::from_secs(3),
2044 client.get_events_of(vec![status_event_filter.clone()], EventSource::relays(None)),
2045 )
2046 .await
2047 {
2048 Ok(Ok(events)) => events,
2049 Ok(Err(e)) => {
2050 let _ = client.disconnect().await;
2051 return Err(anyhow::anyhow!(
2052 "Failed to fetch PR status events from relays: {}",
2053 e
2054 ));
2055 }
2056 Err(_) => {
2057 let _ = client.disconnect().await;
2058 return Err(anyhow::anyhow!(
2059 "Timed out fetching PR status events from relays"
2060 ));
2061 }
2062 };
2063
2064 if status_events.is_empty() {
2065 let fallback_events = fetch_events_via_raw_relay_query(
2066 &self.relays,
2067 status_event_filter,
2068 Duration::from_secs(3),
2069 )
2070 .await;
2071 if !fallback_events.is_empty() {
2072 debug!(
2073 "Raw relay fallback recovered {} PR status event(s) for {}",
2074 fallback_events.len(),
2075 repo_name
2076 );
2077 status_events = fallback_events;
2078 }
2079 }
2080
2081 let _ = client.disconnect().await;
2082
2083 let latest_status =
2085 latest_trusted_pr_status_kinds(&pr_events, &status_events, &self.pubkey);
2086
2087 let mut prs = Vec::new();
2088 for event in &pr_events {
2089 let pr_id = event.id.to_hex();
2090 let state =
2091 PullRequestState::from_latest_status_kind(latest_status.get(&pr_id).copied());
2092 if !state_filter.includes(state) {
2093 continue;
2094 }
2095
2096 let mut subject = None;
2097 let mut commit_tip = None;
2098 let mut branch = None;
2099 let mut target_branch = None;
2100
2101 for tag in event.tags.iter() {
2102 let slice = tag.as_slice();
2103 if slice.len() >= 2 {
2104 match slice[0].as_str() {
2105 "subject" => subject = Some(slice[1].to_string()),
2106 "c" => commit_tip = Some(slice[1].to_string()),
2107 "branch" => branch = Some(slice[1].to_string()),
2108 "target-branch" => target_branch = Some(slice[1].to_string()),
2109 _ => {}
2110 }
2111 }
2112 }
2113
2114 prs.push(PullRequestListItem {
2115 event_id: pr_id,
2116 author_pubkey: event.pubkey.to_hex(),
2117 state,
2118 subject,
2119 commit_tip,
2120 branch,
2121 target_branch,
2122 created_at: event.created_at.as_u64(),
2123 });
2124 }
2125
2126 prs.sort_by(|left, right| {
2128 right
2129 .created_at
2130 .cmp(&left.created_at)
2131 .then_with(|| right.event_id.cmp(&left.event_id))
2132 });
2133
2134 debug!(
2135 "Found {} PRs for {} (filter: {:?})",
2136 prs.len(),
2137 repo_name,
2138 state_filter
2139 );
2140 Ok(prs)
2141 }
2142
2143 pub fn publish_pr_merged_status(
2145 &self,
2146 pr_event_id: &str,
2147 pr_author_pubkey: &str,
2148 ) -> Result<()> {
2149 let keys = self
2150 .keys
2151 .as_ref()
2152 .context("Cannot publish status: no secret key")?;
2153
2154 let rt = tokio::runtime::Builder::new_multi_thread()
2155 .enable_all()
2156 .build()
2157 .context("Failed to create tokio runtime")?;
2158
2159 let result =
2160 rt.block_on(self.publish_pr_merged_status_async(keys, pr_event_id, pr_author_pubkey));
2161 rt.shutdown_timeout(Duration::from_millis(500));
2162 result
2163 }
2164
2165 async fn publish_pr_merged_status_async(
2166 &self,
2167 keys: &Keys,
2168 pr_event_id: &str,
2169 pr_author_pubkey: &str,
2170 ) -> Result<()> {
2171 let client = Client::new(keys.clone());
2172
2173 for relay in &self.relays {
2174 if let Err(e) = client.add_relay(relay).await {
2175 warn!("Failed to add relay {}: {}", relay, e);
2176 }
2177 }
2178 client.connect().await;
2179
2180 let start = std::time::Instant::now();
2182 loop {
2183 let relays = client.relays().await;
2184 let mut connected = false;
2185 for relay in relays.values() {
2186 if relay.is_connected().await {
2187 connected = true;
2188 break;
2189 }
2190 }
2191 if connected {
2192 break;
2193 }
2194 if start.elapsed() > Duration::from_secs(3) {
2195 anyhow::bail!("Failed to connect to any relay for status publish");
2196 }
2197 tokio::time::sleep(Duration::from_millis(50)).await;
2198 }
2199
2200 let tags = vec![
2201 Tag::custom(TagKind::custom("e"), vec![pr_event_id.to_string()]),
2202 Tag::custom(TagKind::custom("p"), vec![pr_author_pubkey.to_string()]),
2203 ];
2204
2205 let event = EventBuilder::new(Kind::Custom(KIND_STATUS_APPLIED), "", tags)
2206 .to_event(keys)
2207 .map_err(|e| anyhow::anyhow!("Failed to sign status event: {}", e))?;
2208
2209 let publish_result = match client.send_event(event).await {
2210 Ok(output) => {
2211 if output.success.is_empty() {
2212 Err(anyhow::anyhow!(
2213 "PR merged status was not confirmed by any relay"
2214 ))
2215 } else {
2216 info!(
2217 "Published PR merged status to {} relays",
2218 output.success.len()
2219 );
2220 Ok(())
2221 }
2222 }
2223 Err(e) => Err(anyhow::anyhow!("Failed to publish PR merged status: {}", e)),
2224 };
2225
2226 let _ = client.disconnect().await;
2227 tokio::time::sleep(Duration::from_millis(50)).await;
2228 publish_result
2229 }
2230
2231 #[allow(dead_code)]
2233 pub async fn upload_blob(&self, _hash: &str, data: &[u8]) -> Result<String> {
2234 let hash = self
2235 .blossom
2236 .upload(data)
2237 .await
2238 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))?;
2239 Ok(hash)
2240 }
2241
2242 #[allow(dead_code)]
2244 pub async fn upload_blob_if_missing(&self, data: &[u8]) -> Result<(String, bool)> {
2245 self.blossom
2246 .upload_if_missing(data)
2247 .await
2248 .map_err(|e| anyhow::anyhow!("Blossom upload failed: {}", e))
2249 }
2250
2251 #[allow(dead_code)]
2253 pub async fn download_blob(&self, hash: &str) -> Result<Vec<u8>> {
2254 self.blossom
2255 .download(hash)
2256 .await
2257 .map_err(|e| anyhow::anyhow!("Blossom download failed: {}", e))
2258 }
2259
2260 #[allow(dead_code)]
2262 pub async fn try_download_blob(&self, hash: &str) -> Option<Vec<u8>> {
2263 self.blossom.try_download(hash).await
2264 }
2265}
2266
2267#[cfg(test)]
2268mod tests {
2269 use super::*;
2270
2271 const TEST_PUBKEY: &str = "4523be58d395b1b196a9b8c82b038b6895cb02b683d0c253a955068dba1facd0";
2272
2273 fn test_config() -> Config {
2274 Config::default()
2275 }
2276
2277 #[test]
2278 fn test_new_client() {
2279 let config = test_config();
2280 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2281 assert!(!client.relays.is_empty());
2282 assert!(!client.can_sign());
2283 }
2284
2285 #[test]
2286 fn test_new_client_with_secret() {
2287 let config = test_config();
2288 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2289 let client =
2290 NostrClient::new(TEST_PUBKEY, Some(secret.to_string()), None, false, &config).unwrap();
2291 assert!(client.can_sign());
2292 }
2293
2294 #[test]
2295 fn test_new_client_uses_local_read_server_as_daemon_fallback() {
2296 let mut config = test_config();
2297 config.server.bind_address = "127.0.0.1:1".to_string();
2298 config.blossom.read_servers = vec!["http://127.0.0.1:19092".to_string()];
2299
2300 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2301 assert_eq!(
2302 client.local_daemon_url.as_deref(),
2303 Some("http://127.0.0.1:19092")
2304 );
2305 }
2306
2307 #[test]
2308 fn test_fetch_refs_empty() {
2309 let config = test_config();
2310 let client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2311 let refs = client.cached_refs.get("new-repo");
2313 assert!(refs.is_none());
2314 }
2315
2316 #[test]
2317 fn test_validate_repo_publish_relays_allows_local_only_when_only_local_relays_configured() {
2318 let configured = vec!["ws://127.0.0.1:8080/ws".to_string()];
2319 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2320
2321 assert!(validate_repo_publish_relays(&configured, &connected).is_ok());
2322 }
2323
2324 #[test]
2325 fn test_validate_repo_publish_relays_rejects_local_only_when_public_relays_configured() {
2326 let configured = vec![
2327 "ws://127.0.0.1:8080/ws".to_string(),
2328 "wss://relay.damus.io".to_string(),
2329 ];
2330 let connected = vec!["ws://127.0.0.1:8080/ws".to_string()];
2331
2332 let err = validate_repo_publish_relays(&configured, &connected)
2333 .expect_err("should reject local-only publication");
2334 assert!(err.to_string().contains("No public relay confirmed"));
2335 assert!(err.to_string().contains("local relays only"));
2336 }
2337
2338 #[test]
2339 fn test_update_ref() {
2340 let config = test_config();
2341 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2342
2343 client
2344 .update_ref("repo", "refs/heads/main", "abc123")
2345 .unwrap();
2346
2347 let refs = client.cached_refs.get("repo").unwrap();
2348 assert_eq!(refs.get("refs/heads/main"), Some(&"abc123".to_string()));
2349 }
2350
2351 #[test]
2352 fn test_pick_latest_event_prefers_newer_timestamp() {
2353 let keys = Keys::generate();
2354 let older = Timestamp::from_secs(1_700_000_000);
2355 let newer = Timestamp::from_secs(1_700_000_001);
2356
2357 let event_old = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "old", [])
2358 .custom_created_at(older)
2359 .to_event(&keys)
2360 .unwrap();
2361 let event_new = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "new", [])
2362 .custom_created_at(newer)
2363 .to_event(&keys)
2364 .unwrap();
2365
2366 let picked = pick_latest_event([&event_old, &event_new]).unwrap();
2367 assert_eq!(picked.id, event_new.id);
2368 }
2369
2370 #[test]
2371 fn test_pick_latest_event_breaks_ties_with_event_id() {
2372 let keys = Keys::generate();
2373 let created_at = Timestamp::from_secs(1_700_000_000);
2374
2375 let event_a = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "a", [])
2376 .custom_created_at(created_at)
2377 .to_event(&keys)
2378 .unwrap();
2379 let event_b = EventBuilder::new(Kind::Custom(KIND_APP_DATA), "b", [])
2380 .custom_created_at(created_at)
2381 .to_event(&keys)
2382 .unwrap();
2383
2384 let expected_id = if event_a.id > event_b.id {
2385 event_a.id
2386 } else {
2387 event_b.id
2388 };
2389 let picked = pick_latest_event([&event_a, &event_b]).unwrap();
2390 assert_eq!(picked.id, expected_id);
2391 }
2392
2393 #[test]
2394 fn test_next_replaceable_created_at_uses_now_when_existing_is_older() {
2395 let now = Timestamp::from_secs(1_700_000_010);
2396 let existing = Timestamp::from_secs(1_700_000_009);
2397
2398 assert_eq!(
2399 next_replaceable_created_at(now, Some(existing)),
2400 now,
2401 "older repo events should not delay a new publish"
2402 );
2403 }
2404
2405 #[test]
2406 fn test_next_replaceable_created_at_bumps_same_second_events() {
2407 let now = Timestamp::from_secs(1_700_000_010);
2408 let existing = Timestamp::from_secs(1_700_000_010);
2409
2410 assert_eq!(
2411 next_replaceable_created_at(now, Some(existing)),
2412 Timestamp::from_secs(1_700_000_011),
2413 "same-second repo publishes need a strictly newer timestamp"
2414 );
2415 }
2416
2417 #[test]
2418 fn test_pick_latest_repo_event_ignores_newer_different_d_tag() {
2419 let keys = Keys::generate();
2420 let older = Timestamp::from_secs(1_700_000_000);
2421 let newer = Timestamp::from_secs(1_700_000_031);
2422
2423 let iris_chat = EventBuilder::new(
2424 Kind::Custom(KIND_APP_DATA),
2425 "good",
2426 [
2427 Tag::custom(TagKind::custom("d"), vec!["iris-chat".to_string()]),
2428 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2429 ],
2430 )
2431 .custom_created_at(older)
2432 .to_event(&keys)
2433 .unwrap();
2434
2435 let iris_chat_flutter = EventBuilder::new(
2436 Kind::Custom(KIND_APP_DATA),
2437 "bad",
2438 [
2439 Tag::custom(TagKind::custom("d"), vec!["iris-chat-flutter".to_string()]),
2440 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2441 ],
2442 )
2443 .custom_created_at(newer)
2444 .to_event(&keys)
2445 .unwrap();
2446
2447 let picked = pick_latest_repo_event([&iris_chat, &iris_chat_flutter], "iris-chat").unwrap();
2448 assert_eq!(picked.id, iris_chat.id);
2449 }
2450
2451 #[test]
2452 fn test_append_repo_discovery_labels_includes_git_label_and_prefixes() {
2453 let mut tags = vec![];
2454 append_repo_discovery_labels(&mut tags, "tools/hashtree");
2455
2456 let values: Vec<String> = tags
2457 .iter()
2458 .filter_map(|tag| {
2459 let parts = tag.as_slice();
2460 if parts.first().map(|kind| kind.as_str()) != Some("l") {
2461 return None;
2462 }
2463 parts.get(1).cloned()
2464 })
2465 .collect();
2466
2467 assert!(values.iter().any(|value| value == LABEL_GIT));
2468 assert!(values.iter().any(|value| value == "tools"));
2469 }
2470
2471 #[test]
2472 fn test_list_git_repo_announcements_filters_dedupes_and_sorts() {
2473 let keys = Keys::generate();
2474 let alpha_old = EventBuilder::new(
2475 Kind::Custom(KIND_APP_DATA),
2476 "old",
2477 [
2478 Tag::custom(TagKind::custom("d"), vec!["alpha".to_string()]),
2479 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2480 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2481 ],
2482 )
2483 .custom_created_at(Timestamp::from_secs(10))
2484 .to_event(&keys)
2485 .unwrap();
2486 let alpha_new = EventBuilder::new(
2487 Kind::Custom(KIND_APP_DATA),
2488 "new",
2489 [
2490 Tag::custom(TagKind::custom("d"), vec!["alpha".to_string()]),
2491 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2492 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2493 ],
2494 )
2495 .custom_created_at(Timestamp::from_secs(20))
2496 .to_event(&keys)
2497 .unwrap();
2498 let zeta = EventBuilder::new(
2499 Kind::Custom(KIND_APP_DATA),
2500 "zeta",
2501 [
2502 Tag::custom(TagKind::custom("d"), vec!["zeta/tools".to_string()]),
2503 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2504 Tag::custom(TagKind::custom("l"), vec![LABEL_GIT.to_string()]),
2505 ],
2506 )
2507 .custom_created_at(Timestamp::from_secs(15))
2508 .to_event(&keys)
2509 .unwrap();
2510 let ignored = EventBuilder::new(
2511 Kind::Custom(KIND_APP_DATA),
2512 "ignored",
2513 [
2514 Tag::custom(TagKind::custom("d"), vec!["not-git".to_string()]),
2515 Tag::custom(TagKind::custom("l"), vec![LABEL_HASHTREE.to_string()]),
2516 ],
2517 )
2518 .custom_created_at(Timestamp::from_secs(30))
2519 .to_event(&keys)
2520 .unwrap();
2521
2522 let repos = list_git_repo_announcements(&[alpha_old, zeta, ignored, alpha_new]);
2523 let names: Vec<&str> = repos.iter().map(|repo| repo.repo_name.as_str()).collect();
2524
2525 assert_eq!(names, vec!["alpha", "zeta/tools"]);
2526 assert_eq!(repos[0].created_at, Timestamp::from_secs(20));
2527 }
2528
2529 #[test]
2530 fn test_parse_daemon_response_to_root_data_encrypted_key() {
2531 let payload = DaemonResolveResponse {
2532 hash: Some("ab".repeat(32)),
2533 key: None,
2534 encrypted_key: Some("11".repeat(32)),
2535 self_encrypted_key: None,
2536 source: Some("webrtc".to_string()),
2537 };
2538
2539 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2540 assert_eq!(parsed.root_hash, "ab".repeat(32));
2541 assert_eq!(parsed.key_tag_name.as_deref(), Some("encryptedKey"));
2542 assert!(parsed.self_encrypted_ciphertext.is_none());
2543 assert_eq!(parsed.encryption_key.unwrap(), [0x11; 32]);
2544 }
2545
2546 #[test]
2547 fn test_parse_daemon_response_to_root_data_self_encrypted() {
2548 let payload = DaemonResolveResponse {
2549 hash: Some("cd".repeat(32)),
2550 key: None,
2551 encrypted_key: None,
2552 self_encrypted_key: Some("ciphertext".to_string()),
2553 source: Some("webrtc".to_string()),
2554 };
2555
2556 let parsed = NostrClient::parse_daemon_response_to_root_data(payload).unwrap();
2557 assert_eq!(parsed.root_hash, "cd".repeat(32));
2558 assert_eq!(parsed.key_tag_name.as_deref(), Some("selfEncryptedKey"));
2559 assert_eq!(
2560 parsed.self_encrypted_ciphertext.as_deref(),
2561 Some("ciphertext")
2562 );
2563 assert!(parsed.encryption_key.is_none());
2564 }
2565
2566 #[tokio::test]
2567 async fn test_fetch_root_from_local_daemon_parses_response() {
2568 use axum::{extract::Path, routing::get, Json, Router};
2569 use serde_json::json;
2570
2571 let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
2572 let addr = listener.local_addr().unwrap();
2573 let app = Router::new().route(
2574 "/api/nostr/resolve/:pubkey/:treename",
2575 get(
2576 |Path((pubkey, treename)): Path<(String, String)>| async move {
2577 Json(json!({
2578 "key": format!("{}/{}", pubkey, treename),
2579 "hash": "ab".repeat(32),
2580 "source": "webrtc",
2581 "key_tag": "22".repeat(32),
2582 }))
2583 },
2584 ),
2585 );
2586
2587 let server = tokio::spawn(async move {
2588 let _ = axum::serve(listener, app).await;
2589 });
2590
2591 let config = test_config();
2592 let mut client = NostrClient::new(TEST_PUBKEY, None, None, false, &config).unwrap();
2593 client.local_daemon_url = Some(format!("http://{}", addr));
2594
2595 let resolved = client
2596 .fetch_root_from_local_daemon("repo", Duration::from_secs(2))
2597 .await
2598 .unwrap();
2599 assert_eq!(resolved.root_hash, "ab".repeat(32));
2600 assert_eq!(resolved.key_tag_name.as_deref(), Some("key"));
2601 assert_eq!(resolved.encryption_key, Some([0x22; 32]));
2602
2603 server.abort();
2604 }
2605
2606 #[test]
2607 fn test_stored_key_from_hex() {
2608 let secret = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
2609 let key = StoredKey::from_secret_hex(secret, Some("test".to_string())).unwrap();
2610 assert_eq!(key.secret_hex.as_deref(), Some(secret));
2611 assert_eq!(key.petname, Some("test".to_string()));
2612 assert_eq!(key.pubkey_hex.len(), 64);
2613 }
2614
2615 #[test]
2616 fn test_stored_key_from_nsec() {
2617 let nsec = "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5";
2619 let key = StoredKey::from_nsec(nsec, None).unwrap();
2620 assert_eq!(key.secret_hex.as_deref().map(str::len), Some(64));
2621 assert_eq!(key.pubkey_hex.len(), 64);
2622 }
2623
2624 #[test]
2625 fn test_stored_key_from_npub_is_read_only() {
2626 let npub = "npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm";
2627 let key = StoredKey::from_npub(npub, Some("sirius".to_string())).unwrap();
2628
2629 assert!(key.secret_hex.is_none());
2630 assert_eq!(key.petname.as_deref(), Some("sirius"));
2631 assert_eq!(key.pubkey_hex.len(), 64);
2632 }
2633
2634 #[test]
2635 fn test_resolve_self_identity_ignores_read_only_aliases() {
2636 let read_only = StoredKey::from_npub(
2637 "npub1xdhnr9mrv47kkrn95k6cwecearydeh8e895990n3acntwvmgk2dsdeeycm",
2638 Some("self".to_string()),
2639 )
2640 .unwrap();
2641 let signing = StoredKey::from_nsec(
2642 "nsec1vl029mgpspedva04g90vltkh6fvh240zqtv9k0t9af8935ke9laqsnlfe5",
2643 Some("work".to_string()),
2644 )
2645 .unwrap();
2646
2647 let resolved = resolve_self_identity(&[read_only, signing.clone()]).unwrap();
2648
2649 assert_eq!(resolved.0, signing.pubkey_hex);
2650 assert_eq!(resolved.1, signing.secret_hex);
2651 }
2652
2653 #[test]
2654 fn test_resolve_identity_hex_pubkey() {
2655 let result = resolve_identity(TEST_PUBKEY);
2657 assert!(result.is_ok());
2658 let (pubkey, secret) = result.unwrap();
2659 assert_eq!(pubkey, TEST_PUBKEY);
2660 assert!(secret.is_none());
2662 }
2663
2664 #[test]
2665 fn test_resolve_identity_npub() {
2666 let pk_bytes = hex::decode(TEST_PUBKEY).unwrap();
2668 let pk = PublicKey::from_slice(&pk_bytes).unwrap();
2669 let npub = pk.to_bech32().unwrap();
2670
2671 let result = resolve_identity(&npub);
2672 assert!(result.is_ok(), "Failed: {:?}", result.err());
2673 let (pubkey, _) = result.unwrap();
2674 assert_eq!(pubkey.len(), 64);
2676 assert_eq!(pubkey, TEST_PUBKEY);
2677 }
2678
2679 #[test]
2680 fn test_format_repo_author_uses_full_npub() {
2681 let formatted = NostrClient::format_repo_author(TEST_PUBKEY);
2682 let expected = PublicKey::from_hex(TEST_PUBKEY)
2683 .unwrap()
2684 .to_bech32()
2685 .unwrap();
2686
2687 assert_eq!(formatted, expected);
2688 assert!(!formatted.contains("..."));
2689 }
2690
2691 #[test]
2692 fn test_resolve_identity_unknown_petname() {
2693 let result = resolve_identity("nonexistent_petname_xyz");
2694 assert!(result.is_err());
2695 }
2696
2697 #[test]
2699 fn test_private_key_is_nip44_encrypted_not_plaintext() {
2700 use nostr_sdk::prelude::{nip44, Keys};
2701
2702 let keys = Keys::generate();
2704 let pubkey = keys.public_key();
2705
2706 let chk_key: [u8; 32] = [
2708 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab,
2709 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67,
2710 0x89, 0xab, 0xcd, 0xef,
2711 ];
2712 let plaintext_hex = hex::encode(&chk_key);
2713
2714 let encrypted = nip44::encrypt(
2716 keys.secret_key(),
2717 &pubkey,
2718 &plaintext_hex,
2719 nip44::Version::V2,
2720 )
2721 .expect("NIP-44 encryption should succeed");
2722
2723 assert_ne!(
2725 encrypted, plaintext_hex,
2726 "NIP-44 encrypted value must differ from plaintext CHK hex"
2727 );
2728
2729 assert!(
2731 !encrypted.contains(&plaintext_hex),
2732 "Encrypted value should not contain plaintext hex"
2733 );
2734
2735 let decrypted = nip44::decrypt(keys.secret_key(), &pubkey, &encrypted)
2737 .expect("NIP-44 decryption should succeed");
2738
2739 assert_eq!(
2740 decrypted, plaintext_hex,
2741 "Decrypted value should match original plaintext hex"
2742 );
2743 }
2744
2745 #[test]
2747 fn test_encryption_modes_produce_different_values() {
2748 use nostr_sdk::prelude::{nip44, Keys};
2749
2750 let keys = Keys::generate();
2751 let pubkey = keys.public_key();
2752
2753 let chk_key: [u8; 32] = [0xaa; 32];
2755 let plaintext_hex = hex::encode(&chk_key);
2756
2757 let public_value = plaintext_hex.clone();
2759
2760 let private_value = nip44::encrypt(
2764 keys.secret_key(),
2765 &pubkey,
2766 &plaintext_hex,
2767 nip44::Version::V2,
2768 )
2769 .expect("NIP-44 encryption should succeed");
2770
2771 assert_ne!(
2773 private_value, public_value,
2774 "Private (NIP-44) value must differ from public (plaintext) value"
2775 );
2776
2777 assert!(
2779 private_value.len() != 64,
2780 "NIP-44 output should not be 64 chars like hex CHK"
2781 );
2782 }
2783
2784 fn build_test_pr_event(keys: &Keys, created_at_secs: u64) -> Event {
2785 EventBuilder::new(
2786 Kind::Custom(KIND_PULL_REQUEST),
2787 "",
2788 [Tag::custom(
2789 TagKind::custom("subject"),
2790 vec!["test pr".to_string()],
2791 )],
2792 )
2793 .custom_created_at(Timestamp::from_secs(created_at_secs))
2794 .to_event(keys)
2795 .unwrap()
2796 }
2797
2798 fn build_test_status_event(
2799 keys: &Keys,
2800 kind: u16,
2801 pr_event_id: &str,
2802 created_at_secs: u64,
2803 ) -> Event {
2804 EventBuilder::new(
2805 Kind::Custom(kind),
2806 "",
2807 [Tag::custom(
2808 TagKind::custom("e"),
2809 vec![pr_event_id.to_string()],
2810 )],
2811 )
2812 .custom_created_at(Timestamp::from_secs(created_at_secs))
2813 .to_event(keys)
2814 .unwrap()
2815 }
2816
2817 #[test]
2818 fn test_pull_request_state_from_latest_status_kind_defaults_to_open() {
2819 assert_eq!(
2820 PullRequestState::from_latest_status_kind(None),
2821 PullRequestState::Open
2822 );
2823 assert_eq!(
2824 PullRequestState::from_latest_status_kind(Some(KIND_STATUS_OPEN)),
2825 PullRequestState::Open
2826 );
2827 assert_eq!(
2828 PullRequestState::from_latest_status_kind(Some(9999)),
2829 PullRequestState::Open
2830 );
2831 }
2832
2833 #[test]
2834 fn test_pull_request_state_from_status_kind_maps_known_kinds() {
2835 assert_eq!(
2836 PullRequestState::from_status_kind(KIND_STATUS_APPLIED),
2837 Some(PullRequestState::Applied)
2838 );
2839 assert_eq!(
2840 PullRequestState::from_status_kind(KIND_STATUS_CLOSED),
2841 Some(PullRequestState::Closed)
2842 );
2843 assert_eq!(
2844 PullRequestState::from_status_kind(KIND_STATUS_DRAFT),
2845 Some(PullRequestState::Draft)
2846 );
2847 assert_eq!(PullRequestState::from_status_kind(9999), None);
2848 }
2849
2850 #[test]
2851 fn test_pull_request_state_filter_includes_only_requested_state() {
2852 assert!(PullRequestStateFilter::Open.includes(PullRequestState::Open));
2853 assert!(!PullRequestStateFilter::Open.includes(PullRequestState::Closed));
2854 assert!(PullRequestStateFilter::All.includes(PullRequestState::Open));
2855 assert!(PullRequestStateFilter::All.includes(PullRequestState::Applied));
2856 assert!(PullRequestStateFilter::All.includes(PullRequestState::Closed));
2857 assert!(PullRequestStateFilter::All.includes(PullRequestState::Draft));
2858 }
2859
2860 #[test]
2861 fn test_pull_request_state_strings_are_stable() {
2862 assert_eq!(PullRequestState::Open.as_str(), "open");
2863 assert_eq!(PullRequestState::Applied.as_str(), "applied");
2864 assert_eq!(PullRequestState::Closed.as_str(), "closed");
2865 assert_eq!(PullRequestState::Draft.as_str(), "draft");
2866
2867 assert_eq!(PullRequestStateFilter::Open.as_str(), "open");
2868 assert_eq!(PullRequestStateFilter::Applied.as_str(), "applied");
2869 assert_eq!(PullRequestStateFilter::Closed.as_str(), "closed");
2870 assert_eq!(PullRequestStateFilter::Draft.as_str(), "draft");
2871 assert_eq!(PullRequestStateFilter::All.as_str(), "all");
2872 }
2873
2874 #[test]
2875 fn test_latest_trusted_pr_status_kinds_ignores_untrusted_signers() {
2876 let repo_owner = Keys::generate();
2877 let pr_author = Keys::generate();
2878 let attacker = Keys::generate();
2879
2880 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2881 let spoofed_status = build_test_status_event(
2882 &attacker,
2883 KIND_STATUS_CLOSED,
2884 &pr_event.id.to_hex(),
2885 1_700_100_010,
2886 );
2887
2888 let statuses = latest_trusted_pr_status_kinds(
2889 &[pr_event.clone()],
2890 &[spoofed_status],
2891 &repo_owner.public_key().to_hex(),
2892 );
2893
2894 assert!(
2895 !statuses.contains_key(&pr_event.id.to_hex()),
2896 "untrusted status signer should be ignored"
2897 );
2898 }
2899
2900 #[test]
2901 fn test_latest_trusted_pr_status_kinds_accepts_pr_author() {
2902 let repo_owner = Keys::generate();
2903 let pr_author = Keys::generate();
2904
2905 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2906 let author_status = build_test_status_event(
2907 &pr_author,
2908 KIND_STATUS_CLOSED,
2909 &pr_event.id.to_hex(),
2910 1_700_100_010,
2911 );
2912
2913 let statuses = latest_trusted_pr_status_kinds(
2914 &[pr_event.clone()],
2915 &[author_status],
2916 &repo_owner.public_key().to_hex(),
2917 );
2918
2919 assert_eq!(
2920 statuses.get(&pr_event.id.to_hex()).copied(),
2921 Some(KIND_STATUS_CLOSED)
2922 );
2923 }
2924
2925 #[test]
2926 fn test_latest_trusted_pr_status_kinds_rejects_applied_from_pr_author() {
2927 let repo_owner = Keys::generate();
2928 let pr_author = Keys::generate();
2929
2930 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2931 let author_applied = build_test_status_event(
2932 &pr_author,
2933 KIND_STATUS_APPLIED,
2934 &pr_event.id.to_hex(),
2935 1_700_100_010,
2936 );
2937
2938 let statuses = latest_trusted_pr_status_kinds(
2939 &[pr_event.clone()],
2940 &[author_applied],
2941 &repo_owner.public_key().to_hex(),
2942 );
2943
2944 assert!(
2945 !statuses.contains_key(&pr_event.id.to_hex()),
2946 "PR author must not be able to self-mark applied"
2947 );
2948 }
2949
2950 #[test]
2951 fn test_latest_trusted_pr_status_kinds_accepts_repo_owner() {
2952 let repo_owner = Keys::generate();
2953 let pr_author = Keys::generate();
2954
2955 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2956 let owner_status = build_test_status_event(
2957 &repo_owner,
2958 KIND_STATUS_APPLIED,
2959 &pr_event.id.to_hex(),
2960 1_700_100_010,
2961 );
2962
2963 let statuses = latest_trusted_pr_status_kinds(
2964 &[pr_event.clone()],
2965 &[owner_status],
2966 &repo_owner.public_key().to_hex(),
2967 );
2968
2969 assert_eq!(
2970 statuses.get(&pr_event.id.to_hex()).copied(),
2971 Some(KIND_STATUS_APPLIED)
2972 );
2973 }
2974
2975 #[test]
2976 fn test_latest_trusted_pr_status_kinds_preserves_owner_applied_over_newer_author_status() {
2977 let repo_owner = Keys::generate();
2978 let pr_author = Keys::generate();
2979
2980 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
2981 let owner_applied = build_test_status_event(
2982 &repo_owner,
2983 KIND_STATUS_APPLIED,
2984 &pr_event.id.to_hex(),
2985 1_700_100_010,
2986 );
2987 let newer_author_open = build_test_status_event(
2988 &pr_author,
2989 KIND_STATUS_OPEN,
2990 &pr_event.id.to_hex(),
2991 1_700_100_020,
2992 );
2993
2994 let statuses = latest_trusted_pr_status_kinds(
2995 &[pr_event.clone()],
2996 &[owner_applied, newer_author_open],
2997 &repo_owner.public_key().to_hex(),
2998 );
2999
3000 assert_eq!(
3001 statuses.get(&pr_event.id.to_hex()).copied(),
3002 Some(KIND_STATUS_APPLIED),
3003 "owner-applied status should remain authoritative even if author publishes a newer status"
3004 );
3005 }
3006
3007 #[test]
3008 fn test_latest_trusted_pr_status_kinds_ignores_newer_untrusted_status() {
3009 let repo_owner = Keys::generate();
3010 let pr_author = Keys::generate();
3011 let attacker = Keys::generate();
3012
3013 let pr_event = build_test_pr_event(&pr_author, 1_700_100_000);
3014 let trusted_open = build_test_status_event(
3015 &repo_owner,
3016 KIND_STATUS_OPEN,
3017 &pr_event.id.to_hex(),
3018 1_700_100_010,
3019 );
3020 let spoofed_closed = build_test_status_event(
3021 &attacker,
3022 KIND_STATUS_CLOSED,
3023 &pr_event.id.to_hex(),
3024 1_700_100_020,
3025 );
3026
3027 let statuses = latest_trusted_pr_status_kinds(
3028 &[pr_event.clone()],
3029 &[trusted_open, spoofed_closed],
3030 &repo_owner.public_key().to_hex(),
3031 );
3032
3033 assert_eq!(
3034 statuses.get(&pr_event.id.to_hex()).copied(),
3035 Some(KIND_STATUS_OPEN)
3036 );
3037 }
3038}