use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::Duration;
use serde::Serialize;
use void_core::ops::publish::{self, PublishOptions};
use void_core::unixfs::{add_directory as unixfs_add_directory, FsStoreAdapter};
use void_core::workspace::stage::{status_workspace, StatusOptions};
use crate::context::{find_void_dir, load_public_identity, open_repo};
use crate::output::{run_command, CliError, CliOptions};
fn build_identity_uri() -> Option<String> {
let (username, signing_pub, recipient_pub, nostr_pub) = load_public_identity().ok()?;
let signing_hex = hex::encode(signing_pub.as_bytes());
let recipient_hex = hex::encode(recipient_pub.as_bytes());
let mut uri = if let Some(name) = username {
format!(
"void://{}@ed25519:{}/x25519:{}",
name, signing_hex, recipient_hex
)
} else {
format!("void://ed25519:{}/x25519:{}", signing_hex, recipient_hex)
};
if let Some(nostr) = nostr_pub {
uri.push_str(&format!("/nostr:{}", nostr.to_hex()));
}
Some(uri)
}
fn derive_pr_branch_name(void_dir: &Path) -> String {
let branch = void_dir
.to_str()
.and_then(|s| {
let utf8 = camino::Utf8Path::new(s);
match void_core::refs::read_head(utf8).ok()? {
Some(void_core::refs::HeadRef::Symbolic(b)) => Some(b),
_ => None,
}
})
.unwrap_or_else(|| "contribution".to_string());
let username = load_public_identity()
.ok()
.and_then(|(name, _, _, _)| name);
match username {
Some(name) => format!("{}/{}", name.to_lowercase(), branch),
None => branch,
}
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PublishCommandOutput {
pub commit_cid: String,
pub content_key: String,
pub fork_command: String,
pub pull_request_command: String,
pub output_dir: String,
pub text_files: usize,
pub asset_files: usize,
pub excluded_files: usize,
pub cbor_size: u64,
pub total_size: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub gateway_url: Option<String>,
}
pub struct PublishArgs {
pub commit: Option<String>,
pub output: Option<String>,
pub push: bool,
pub no_identity: bool,
pub contributors: bool,
}
pub fn run(cwd: &Path, args: PublishArgs, opts: &CliOptions) -> Result<(), CliError> {
run_command("publish", opts, |ctx| {
let void_dir = find_void_dir(cwd)?;
let repo_name = void_dir
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().to_string());
let identity_uri = if args.no_identity {
None
} else {
build_identity_uri()
};
ctx.progress("Extracting repository content...");
let repo = open_repo(cwd)?;
let publish_opts = PublishOptions {
ctx: repo.context().clone(),
commit_cid: args.commit.clone(),
include_identity: !args.no_identity,
include_contributors: args.contributors,
identity_uri,
repo_name: repo_name.clone(),
html_template: include_str!("../../assets/publish/index.html").to_string(),
};
let result = publish::publish(publish_opts)
.map_err(|e| CliError::internal(format!("publish failed: {e}")))?;
let output_dir = match &args.output {
Some(p) => std::path::PathBuf::from(p),
None => cwd.join("_publish"),
};
let mut gateway_url: Option<String> = None;
if args.push {
let temp_dir = tempfile::tempdir()
.map_err(|e| CliError::io_error(format!("failed to create temp dir: {e}")))?;
write_publish_output(&result, temp_dir.path())?;
ctx.progress("Publishing to IPFS...");
let store = repo.context().open_store()
.map_err(|e| CliError::internal(format!("failed to open store: {e}")))?;
let add_result = unixfs_add_directory(&FsStoreAdapter(&store), temp_dir.path())
.map_err(|e| CliError::internal(format!("UnixFS add failed: {e}")))?;
let root_cid = add_result.root_cid.to_string();
gateway_url = Some(format!("https://dweb.link/ipfs/{}/", root_cid));
if !ctx.use_json() {
ctx.info(format!("Published to IPFS: {}", root_cid));
ctx.info(format!(
"Gateway URL: {}",
gateway_url.as_deref().unwrap_or("")
));
}
} else {
write_publish_output(&result, &output_dir)?;
if !ctx.use_json() {
ctx.info(format!("Published to: {}", output_dir.display()));
ctx.info(format!(
" Text files: {} Assets: {} Excluded: {}",
result.stats.text_files,
result.stats.asset_files,
result.stats.excluded_files
));
ctx.info(format!(
" CBOR size: {} bytes Asset size: {} bytes",
result.stats.cbor_size, result.stats.total_asset_size
));
if !result.excluded.is_empty() {
ctx.info(String::new());
ctx.info("Excluded files:");
for exc in &result.excluded {
let reason = match &exc.reason {
publish::ExcludeReason::SecretPattern => "secret pattern",
publish::ExcludeReason::BuildArtifact => "build artifact",
publish::ExcludeReason::BinaryExclusion => "binary format",
};
ctx.info(format!(" {} ({})", exc.path, reason));
}
}
ctx.info(String::new());
ctx.info("To publish to IPFS: void publish --push");
}
}
let total_size =
result.stats.cbor_size + result.stats.total_asset_size + result.index_html.len() as u64;
let fork_command = format!(
"void fork {} --content-key {}",
result.commit_cid, result.content_key
);
let pr_branch = derive_pr_branch_name(&void_dir);
let pull_request_command = format!(
"void pull-request {} --content-key {} --name {}",
result.commit_cid, result.content_key, pr_branch
);
if !ctx.use_json() {
ctx.info(format!("Fork command: {}", fork_command));
ctx.info(format!("PR command: {}", pull_request_command));
}
Ok(PublishCommandOutput {
commit_cid: result.commit_cid.clone(),
content_key: result.content_key,
fork_command,
pull_request_command,
output_dir: output_dir.to_string_lossy().to_string(),
text_files: result.stats.text_files,
asset_files: result.stats.asset_files,
excluded_files: result.stats.excluded_files,
cbor_size: result.stats.cbor_size,
total_size,
gateway_url,
})
})
}
fn write_publish_output(
result: &publish::PublishOutput,
dir: &Path,
) -> Result<(), CliError> {
std::fs::create_dir_all(dir).map_err(|e| CliError::io_error(e.to_string()))?;
std::fs::write(dir.join("index.html"), &result.index_html)
.map_err(|e| CliError::io_error(e.to_string()))?;
std::fs::write(dir.join("content.cbor"), &result.cbor_pack)
.map_err(|e| CliError::io_error(e.to_string()))?;
for asset in &result.assets {
let asset_path = dir.join("assets").join(&asset.repo_path);
if let Some(parent) = asset_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| CliError::io_error(e.to_string()))?;
}
std::fs::write(&asset_path, &asset.content)
.map_err(|e| CliError::io_error(e.to_string()))?;
}
Ok(())
}
const PUBLISH_CID_PLACEHOLDER: &str = "VOID_PUBLISH_CID";
const INDEX_WAIT_SECS: u64 = 30;
const MAX_RETRIES: u32 = 3;
fn check_working_tree_clean(cwd: &Path) -> Result<(), CliError> {
let repo = open_repo(cwd)?;
let status_opts = StatusOptions {
ctx: repo.context().clone(),
patterns: vec![],
observer: None,
};
let result = status_workspace(status_opts)
.map_err(|e| CliError::internal(format!("failed to check working tree status: {e}")))?;
let has_changes = !result.staged_added.is_empty()
|| !result.staged_modified.is_empty()
|| !result.staged_deleted.is_empty()
|| !result.unstaged_modified.is_empty()
|| !result.unstaged_deleted.is_empty();
if has_changes {
return Err(CliError::conflict(
"working tree has uncommitted changes — commit or stash before publishing",
));
}
Ok(())
}
fn warn_if_dirty(cwd: &Path, ctx: &mut crate::output::CommandContext) -> Result<bool, CliError> {
let repo = open_repo(cwd)?;
let status_opts = StatusOptions {
ctx: repo.context().clone(),
patterns: vec![],
observer: None,
};
let result = status_workspace(status_opts)
.map_err(|e| CliError::internal(format!("failed to check working tree status: {e}")))?;
let has_changes = !result.staged_added.is_empty()
|| !result.staged_modified.is_empty()
|| !result.staged_deleted.is_empty()
|| !result.unstaged_modified.is_empty()
|| !result.unstaged_deleted.is_empty();
if !has_changes {
return Ok(true);
}
if ctx.use_json() {
ctx.warn("working tree has uncommitted changes (dry-run, continuing)");
return Ok(true);
}
let proceed = dialoguer::Confirm::new()
.with_prompt("Working tree has uncommitted changes. Continue dry-run?")
.default(false)
.interact()
.map_err(|e| CliError::internal(format!("prompt failed: {e}")))?;
Ok(proceed)
}
fn generate_and_push_site(
cwd: &Path,
ctx: &mut crate::output::CommandContext,
) -> Result<(String, String), CliError> {
let void_dir = find_void_dir(cwd)?;
let repo_name = void_dir
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().to_string());
let identity_uri = build_identity_uri();
ctx.progress("Generating publish site for HEAD...");
let repo = open_repo(cwd)?;
let publish_opts = PublishOptions {
ctx: repo.context().clone(),
commit_cid: None,
include_identity: true,
include_contributors: false,
identity_uri,
repo_name,
html_template: include_str!("../../assets/publish/index.html").to_string(),
};
let result = publish::publish(publish_opts)
.map_err(|e| CliError::internal(format!("publish site generation failed: {e}")))?;
let commit_cid = result.commit_cid.clone();
let temp_dir = tempfile::tempdir()
.map_err(|e| CliError::io_error(format!("failed to create temp dir: {e}")))?;
write_publish_output(&result, temp_dir.path())?;
ctx.progress("Publishing site via UnixFS...");
let store = repo.context().open_store()
.map_err(|e| CliError::internal(format!("failed to open store: {e}")))?;
let add_result = unixfs_add_directory(&FsStoreAdapter(&store), temp_dir.path())
.map_err(|e| CliError::internal(format!("UnixFS add failed: {e}")))?;
let ipfs_root_cid = add_result.root_cid.to_string();
ctx.info(format!("Publish site pushed: {ipfs_root_cid}"));
Ok((commit_cid, ipfs_root_cid))
}
fn find_workspace_cargo_toml(cwd: &Path) -> Result<PathBuf, CliError> {
let void_dir = find_void_dir(cwd)?;
let workspace_root = void_dir
.parent()
.ok_or_else(|| CliError::internal("void_dir has no parent"))?;
let cargo_toml = workspace_root.join("Cargo.toml");
if !cargo_toml.exists() {
return Err(CliError::internal(format!(
"workspace Cargo.toml not found at {}",
cargo_toml.display()
)));
}
Ok(cargo_toml)
}
fn swap_publish_cid(cargo_toml: &Path, real_cid: &str) -> Result<(), CliError> {
let content = std::fs::read_to_string(cargo_toml)
.map_err(|e| CliError::io_error(format!("failed to read {}: {e}", cargo_toml.display())))?;
let patched = content.replace(PUBLISH_CID_PLACEHOLDER, real_cid);
std::fs::write(cargo_toml, patched)
.map_err(|e| CliError::io_error(format!("failed to write {}: {e}", cargo_toml.display())))?;
Ok(())
}
fn revert_publish_cid(cargo_toml: &Path, real_cid: &str) -> Result<(), CliError> {
let content = std::fs::read_to_string(cargo_toml)
.map_err(|e| CliError::io_error(format!("failed to read {}: {e}", cargo_toml.display())))?;
let reverted = content.replace(real_cid, PUBLISH_CID_PLACEHOLDER);
std::fs::write(cargo_toml, reverted)
.map_err(|e| CliError::io_error(format!("failed to write {}: {e}", cargo_toml.display())))?;
Ok(())
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CratesIoPublishOutput {
pub dry_run: bool,
pub version: String,
pub crates_published: Vec<String>,
pub publish_order: Vec<Vec<String>>,
pub commit_cid: String,
pub ipfs_root_cid: String,
pub repository_url: String,
}
fn resolve_publish_layers() -> Result<(Vec<Vec<String>>, String), CliError> {
let output = Command::new("cargo")
.args(["metadata", "--format-version", "1", "--no-deps"])
.output()
.map_err(|e| CliError::internal(format!("failed to run cargo metadata: {e}")))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(CliError::internal(format!("cargo metadata failed: {stderr}")));
}
let meta: serde_json::Value = serde_json::from_slice(&output.stdout)
.map_err(|e| CliError::internal(format!("failed to parse cargo metadata: {e}")))?;
let packages = meta["packages"]
.as_array()
.ok_or_else(|| CliError::internal("no packages in cargo metadata".to_string()))?;
let workspace_members: std::collections::HashSet<String> = meta["workspace_members"]
.as_array()
.ok_or_else(|| CliError::internal("no workspace_members in cargo metadata".to_string()))?
.iter()
.filter_map(|m| {
let s = m.as_str()?;
let after_hash = s.rsplit('#').next()?;
let name = after_hash.split('@').next()?;
Some(name.to_string())
})
.collect();
let mut deps: std::collections::HashMap<String, Vec<String>> = std::collections::HashMap::new();
let mut version = String::new();
for pkg in packages {
let name = pkg["name"].as_str().unwrap_or_default().to_string();
if !workspace_members.contains(&name) {
continue;
}
if version.is_empty() {
if let Some(v) = pkg["version"].as_str() {
version = v.to_string();
}
}
let pkg_deps: Vec<String> = pkg["dependencies"]
.as_array()
.map(|arr| {
arr.iter()
.filter_map(|d| {
let dep_name = d["name"].as_str()?.to_string();
if dep_name != name && workspace_members.contains(&dep_name) {
Some(dep_name)
} else {
None
}
})
.collect()
})
.unwrap_or_default();
deps.insert(name, pkg_deps);
}
if deps.is_empty() {
return Err(CliError::internal("no workspace crates found".to_string()));
}
let mut in_degree: std::collections::HashMap<String, usize> = deps
.iter()
.map(|(k, v)| (k.clone(), v.len()))
.collect();
let mut layers: Vec<Vec<String>> = Vec::new();
let mut remaining: std::collections::HashSet<String> = deps.keys().cloned().collect();
while !remaining.is_empty() {
let mut layer: Vec<String> = remaining
.iter()
.filter(|name| in_degree.get(*name).copied().unwrap_or(0) == 0)
.cloned()
.collect();
if layer.is_empty() {
return Err(CliError::internal(format!(
"circular dependency detected among: {}",
remaining.into_iter().collect::<Vec<_>>().join(", ")
)));
}
layer.sort();
for name in &layer {
remaining.remove(name);
for (other, other_deps) in &deps {
if remaining.contains(other) && other_deps.contains(name) {
if let Some(count) = in_degree.get_mut(other) {
*count = count.saturating_sub(1);
}
}
}
}
layers.push(layer);
}
Ok((layers, version))
}
fn publish_one_crate(name: &str, no_verify: bool, ctx: &mut crate::output::CommandContext) -> Result<(), CliError> {
for attempt in 1..=MAX_RETRIES {
ctx.progress(format!("Publishing {name} (attempt {attempt}/{MAX_RETRIES})..."));
let mut cmd = Command::new("cargo");
cmd.args(["publish", "-p", name]);
if no_verify {
cmd.arg("--no-verify");
}
let status = cmd.status().map_err(|e| {
CliError::internal(format!("failed to run cargo: {e}"))
})?;
if status.success() {
ctx.info(format!(" {name} published"));
return Ok(());
}
if attempt < MAX_RETRIES {
let wait = INDEX_WAIT_SECS * attempt as u64;
ctx.warn(format!(
"{name} failed (exit {}), retrying in {wait}s...",
status.code().unwrap_or(-1)
));
std::thread::sleep(Duration::from_secs(wait));
}
}
Err(CliError::internal(format!(
"{name} failed after {MAX_RETRIES} attempts"
)))
}
pub fn run_crates_io(execute: bool, no_verify: bool, cwd: &Path, opts: &CliOptions) -> Result<(), CliError> {
run_command("publish --crates-io", opts, |ctx| {
if execute {
check_working_tree_clean(cwd)?;
} else if !warn_if_dirty(cwd, ctx)? {
return Err(CliError::internal("aborted by user"));
}
ctx.progress("Generating publish site and pushing to IPFS...");
let (commit_cid, ipfs_root_cid) = generate_and_push_site(cwd, ctx)?;
let repository_url = format!("https://dweb.link/ipfs/{}/", ipfs_root_cid);
if !ctx.use_json() {
ctx.info(format!("Repository URL: {repository_url}"));
}
let cargo_toml = find_workspace_cargo_toml(cwd)?;
swap_publish_cid(&cargo_toml, &ipfs_root_cid)?;
ctx.info("Swapped VOID_PUBLISH_CID → real CID in Cargo.toml");
ctx.progress("Resolving workspace dependency graph...");
let (layers, version) = resolve_publish_layers()?;
let publish_order: Vec<Vec<String>> = layers.clone();
let publish_result: Result<CratesIoPublishOutput, CliError> = (|| {
if !execute {
ctx.info(format!("=== DRY RUN: v{version} ==="));
ctx.info(String::new());
for (i, layer) in layers.iter().enumerate() {
ctx.info(format!("Layer {i}: {}", layer.join(", ")));
for name in layer {
let mut args = format!("cargo publish -p {name} --dry-run");
if no_verify {
args.push_str(" --no-verify");
}
ctx.info(format!(" {args}"));
let mut cmd = Command::new("cargo");
cmd.args(["publish", "-p", name, "--dry-run"]);
if no_verify {
cmd.arg("--no-verify");
}
let status = cmd.status().map_err(|e| {
CliError::internal(format!("failed to run cargo: {e}"))
})?;
if !status.success() {
ctx.warn(format!(" {name} dry-run exited with {} (expected for crates with unpublished workspace deps)", status.code().unwrap_or(-1)));
}
}
ctx.info(String::new());
}
ctx.info("Pass --execute to publish for real.");
Ok(CratesIoPublishOutput {
dry_run: true,
version,
crates_published: vec![],
publish_order,
commit_cid,
ipfs_root_cid: ipfs_root_cid.clone(),
repository_url,
})
} else {
ctx.info(format!("Publishing workspace v{version} to crates.io"));
ctx.info(String::new());
let mut published = Vec::new();
for (i, layer) in layers.iter().enumerate() {
ctx.info(format!("--- Layer {i} ---"));
for name in layer {
publish_one_crate(name, no_verify, ctx)?;
published.push(name.to_string());
}
if i < layers.len() - 1 {
ctx.progress(format!(
"Waiting {INDEX_WAIT_SECS}s for crates.io index propagation..."
));
std::thread::sleep(Duration::from_secs(INDEX_WAIT_SECS));
}
}
ctx.info(String::new());
ctx.info(format!("=== All crates published (v{version}) ==="));
Ok(CratesIoPublishOutput {
dry_run: false,
version,
crates_published: published,
publish_order,
commit_cid,
ipfs_root_cid: ipfs_root_cid.clone(),
repository_url,
})
}
})();
revert_publish_cid(&cargo_toml, &ipfs_root_cid)?;
ctx.info("Reverted Cargo.toml → VOID_PUBLISH_CID");
publish_result
})
}