use std::{io::LineWriter, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
use anyhow::Context;
use clap::{Parser, ValueEnum};
use librqbit::{
http_api::{ApiAddTorrentResponse, HttpApi},
http_api_client,
peer_connection::PeerConnectionOptions,
session::{
AddTorrent, AddTorrentOptions, AddTorrentResponse, ListOnlyResponse, Session,
SessionOptions,
},
spawn_utils::{spawn, BlockingSpawner},
torrent_state::ManagedTorrentState,
};
use size_format::SizeFormatterBinary as SF;
use tracing::{error, error_span, info, trace_span, warn};
#[derive(Debug, Clone, Copy, ValueEnum)]
enum LogLevel {
Trace,
Debug,
Info,
Warn,
Error,
}
#[derive(Parser)]
#[command(version, author, about)]
struct Opts {
#[arg(value_enum, short = 'v')]
log_level: Option<LogLevel>,
#[arg(long = "log-file")]
log_file: Option<String>,
#[arg(long = "log-file-rust-log", default_value = "librqbit=trace,info")]
log_file_rust_log: String,
#[arg(short = 'i', long = "tracker-refresh-interval", value_parser = parse_duration::parse)]
force_tracker_interval: Option<Duration>,
#[arg(long = "http-api-listen-addr", default_value = "127.0.0.1:3030")]
http_api_listen_addr: SocketAddr,
#[arg(short, long)]
single_thread_runtime: bool,
#[arg(long = "disable-dht")]
disable_dht: bool,
#[arg(long = "disable-dht-persistence")]
disable_dht_persistence: bool,
#[arg(long = "peer-connect-timeout", value_parser = parse_duration::parse, default_value="2s")]
peer_connect_timeout: Duration,
#[arg(long = "peer-read-write-timeout" , value_parser = parse_duration::parse, default_value="10s")]
peer_read_write_timeout: Duration,
#[arg(short = 't', long)]
worker_threads: Option<usize>,
#[command(subcommand)]
subcommand: SubCommand,
}
#[derive(Parser)]
struct ServerStartOptions {
output_folder: String,
#[arg(
long = "disable-persistence",
help = "Disable server persistence. It will not read or write its state to disk."
)]
disable_persistence: bool,
#[arg(long = "persistence-filename")]
persistence_filename: Option<String>,
}
#[derive(Parser)]
struct ServerOpts {
#[clap(subcommand)]
subcommand: ServerSubcommand,
}
#[derive(Parser)]
enum ServerSubcommand {
Start(ServerStartOptions),
}
#[derive(Parser)]
struct DownloadOpts {
torrent_path: Vec<String>,
#[arg(short = 'o', long)]
output_folder: Option<String>,
#[arg(short = 's', long)]
sub_folder: Option<String>,
#[arg(short = 'r', long = "filename-re")]
only_files_matching_regex: Option<String>,
#[arg(short, long)]
list: bool,
#[arg(long)]
overwrite: bool,
#[arg(short = 'e', long)]
exit_on_finish: bool,
}
#[derive(Parser)]
enum SubCommand {
Server(ServerOpts),
Download(DownloadOpts),
}
fn init_logging(opts: &Opts) -> tokio::sync::mpsc::UnboundedSender<String> {
let default_rust_log = match opts.log_level.as_ref() {
Some(level) => match level {
LogLevel::Trace => "trace",
LogLevel::Debug => "debug",
LogLevel::Info => "info",
LogLevel::Warn => "warn",
LogLevel::Error => "error",
},
None => "info",
};
let stderr_filter = match std::env::var("RUST_LOG").ok() {
Some(rust_log) => EnvFilter::builder()
.parse(rust_log)
.expect("can't parse RUST_LOG"),
None => EnvFilter::builder()
.parse(default_rust_log)
.expect("can't parse default_rust_log"),
};
let (stderr_filter, reload_stderr_filter) =
tracing_subscriber::reload::Layer::new(stderr_filter);
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
#[cfg(feature = "tokio-console")]
{
let (console_layer, server) = console_subscriber::Builder::default()
.with_default_env()
.build();
tracing_subscriber::registry()
.with(fmt::layer().with_filter(stderr_filter))
.with(console_layer)
.init();
spawn(
"console_subscriber server",
error_span!("console_subscriber server"),
async move {
server
.serve()
.await
.map_err(|e| anyhow::anyhow!("{:#?}", e))
.context("error running console subscriber server")
},
);
}
#[cfg(not(feature = "tokio-console"))]
{
let layered = tracing_subscriber::registry().with(fmt::layer().with_filter(stderr_filter));
if let Some(log_file) = &opts.log_file {
let log_file = log_file.clone();
let log_file = move || {
LineWriter::new(
std::fs::OpenOptions::new()
.create(true)
.append(true)
.write(true)
.open(&log_file)
.with_context(|| format!("error opening log file {:?}", log_file))
.unwrap(),
)
};
layered
.with(
fmt::layer()
.with_ansi(false)
.with_writer(log_file)
.with_filter(EnvFilter::builder().parse(&opts.log_file_rust_log).unwrap()),
)
.init();
} else {
layered.init();
}
}
let (reload_tx, mut reload_rx) = tokio::sync::mpsc::unbounded_channel::<String>();
spawn(
"fmt_filter_reloader",
error_span!("fmt_filter_reloader"),
async move {
while let Some(rust_log) = reload_rx.recv().await {
let stderr_env_filter = match EnvFilter::builder().parse(&rust_log) {
Ok(f) => f,
Err(e) => {
eprintln!("can't parse env filter {:?}: {:#?}", rust_log, e);
continue;
}
};
eprintln!("setting RUST_LOG to {:?}", rust_log);
let _ = reload_stderr_filter.reload(stderr_env_filter);
}
Ok(())
},
);
reload_tx
}
fn _start_deadlock_detector_thread() {
use parking_lot::deadlock;
use std::thread;
thread::spawn(move || loop {
thread::sleep(Duration::from_secs(10));
let deadlocks = deadlock::check_deadlock();
if deadlocks.is_empty() {
continue;
}
println!("{} deadlocks detected", deadlocks.len());
for (i, threads) in deadlocks.iter().enumerate() {
println!("Deadlock #{}", i);
for t in threads {
println!("Thread Id {:#?}", t.thread_id());
println!("{:#?}", t.backtrace());
}
}
std::process::exit(42);
});
}
fn main() -> anyhow::Result<()> {
let opts = Opts::parse();
let (mut rt_builder, spawner) = match opts.single_thread_runtime {
true => (
tokio::runtime::Builder::new_current_thread(),
BlockingSpawner::new(false),
),
false => (
{
let mut b = tokio::runtime::Builder::new_multi_thread();
if let Some(e) = opts.worker_threads {
b.worker_threads(e);
}
b
},
BlockingSpawner::new(true),
),
};
let rt = rt_builder
.enable_time()
.enable_io()
.max_blocking_threads(8)
.build()?;
rt.block_on(async_main(opts, spawner))
}
async fn async_main(opts: Opts, spawner: BlockingSpawner) -> anyhow::Result<()> {
let logging_reload_tx = init_logging(&opts);
let mut sopts = SessionOptions {
disable_dht: opts.disable_dht,
disable_dht_persistence: opts.disable_dht_persistence,
dht_config: None,
persistence: false,
persistence_filename: None,
peer_id: None,
peer_opts: Some(PeerConnectionOptions {
connect_timeout: Some(opts.peer_connect_timeout),
read_write_timeout: Some(opts.peer_read_write_timeout),
..Default::default()
}),
};
let stats_printer = |session: Arc<Session>| async move {
loop {
session.with_torrents(|torrents| {
for (idx, torrent) in torrents {
let live = torrent.with_state(|s| {
match s {
ManagedTorrentState::Initializing(i) => {
let total = torrent.get_total_bytes();
let progress = i.get_checked_bytes();
let pct = (progress as f64 / total as f64) * 100f64;
info!("[{}] initializing {:.2}%", idx, pct)
},
ManagedTorrentState::Live(h) => return Some(h.clone()),
_ => {},
};
None
});
let handle = match live {
Some(live) => live,
None => continue
};
let stats = handle.stats_snapshot();
let speed = handle.speed_estimator();
let total = stats.total_bytes;
let progress = stats.total_bytes - stats.remaining_bytes;
let downloaded_pct = if stats.remaining_bytes == 0 {
100f64
} else {
(progress as f64 / total as f64) * 100f64
};
info!(
"[{}]: {:.2}% ({:.2}), down speed {:.2} MiB/s, fetched {}, remaining {:.2} of {:.2}, uploaded {:.2}, peers: {{live: {}, connecting: {}, queued: {}, seen: {}, dead: {}}}",
idx,
downloaded_pct,
SF::new(progress),
speed.download_mbps(),
SF::new(stats.fetched_bytes),
SF::new(stats.remaining_bytes),
SF::new(total),
SF::new(stats.uploaded_bytes),
stats.peer_stats.live,
stats.peer_stats.connecting,
stats.peer_stats.queued,
stats.peer_stats.seen,
stats.peer_stats.dead,
);
}
});
tokio::time::sleep(Duration::from_secs(1)).await;
}
};
match &opts.subcommand {
SubCommand::Server(server_opts) => match &server_opts.subcommand {
ServerSubcommand::Start(start_opts) => {
sopts.persistence = !start_opts.disable_persistence;
sopts.persistence_filename =
start_opts.persistence_filename.clone().map(PathBuf::from);
let session = Session::new_with_opts(
PathBuf::from(&start_opts.output_folder),
spawner,
sopts,
)
.await
.context("error initializing rqbit session")?;
spawn(
"stats_printer",
trace_span!("stats_printer"),
stats_printer(session.clone()),
);
let http_api = HttpApi::new(session, Some(logging_reload_tx));
let http_api_listen_addr = opts.http_api_listen_addr;
http_api
.make_http_api_and_run(http_api_listen_addr, false)
.await
.context("error starting HTTP API")
}
},
SubCommand::Download(download_opts) => {
if download_opts.torrent_path.is_empty() {
anyhow::bail!("you must provide at least one URL to download")
}
let http_api_url = format!("http://{}", opts.http_api_listen_addr);
let client = http_api_client::HttpApiClient::new(&http_api_url)?;
let torrent_opts = AddTorrentOptions {
only_files_regex: download_opts.only_files_matching_regex.clone(),
overwrite: download_opts.overwrite,
list_only: download_opts.list,
force_tracker_interval: opts.force_tracker_interval,
output_folder: download_opts.output_folder.clone(),
sub_folder: download_opts.sub_folder.clone(),
..Default::default()
};
let connect_to_existing = match client.validate_rqbit_server().await {
Ok(_) => {
info!("Connected to HTTP API at {}, will call it instead of downloading within this process", client.base_url());
true
}
Err(err) => {
warn!("Error checking HTTP API at {}: {:}", client.base_url(), err);
false
}
};
if connect_to_existing {
for torrent_url in &download_opts.torrent_path {
match client
.add_torrent(
AddTorrent::from_cli_argument(torrent_url)?,
Some(torrent_opts.clone()),
)
.await
{
Ok(ApiAddTorrentResponse { id, details, .. }) => {
if let Some(id) = id {
info!("{} added to the server with index {}. Query {}/torrents/{}/(stats/haves) for details", details.info_hash, id, http_api_url, id)
}
for file in details.files {
info!(
"file {:?}, size {}{}",
file.name,
SF::new(file.length),
if file.included { "" } else { ", will skip" }
)
}
}
Err(err) => warn!("error adding {}: {:?}", torrent_url, err),
}
}
Ok(())
} else {
let session = Session::new_with_opts(
download_opts
.output_folder
.as_ref()
.map(PathBuf::from)
.context(
"output_folder is required if can't connect to an existing server",
)?,
spawner,
sopts,
)
.await
.context("error initializing rqbit session")?;
spawn(
"stats_printer",
trace_span!("stats_printer"),
stats_printer(session.clone()),
);
let http_api = HttpApi::new(session.clone(), Some(logging_reload_tx));
let http_api_listen_addr = opts.http_api_listen_addr;
spawn(
"http_api",
error_span!("http_api"),
http_api
.clone()
.make_http_api_and_run(http_api_listen_addr, true),
);
let mut added = false;
let mut handles = Vec::new();
for path in &download_opts.torrent_path {
let handle = match session
.add_torrent(
AddTorrent::from_cli_argument(path)?,
Some(torrent_opts.clone()),
)
.await
{
Ok(v) => match v {
AddTorrentResponse::AlreadyManaged(id, handle) => {
info!(
"torrent {:?} is already managed, id={}, downloaded to {:?}",
handle.info_hash(),
id,
handle.info().out_dir
);
continue;
}
AddTorrentResponse::ListOnly(ListOnlyResponse {
info_hash: _,
info,
only_files,
..
}) => {
for (idx, (filename, len)) in
info.iter_filenames_and_lengths()?.enumerate()
{
let included = match &only_files {
Some(files) => files.contains(&idx),
None => true,
};
info!(
"File {}, size {}{}",
filename.to_string()?,
SF::new(len),
if included { "" } else { ", will skip" }
)
}
continue;
}
AddTorrentResponse::Added(_, handle) => {
added = true;
handle
}
},
Err(err) => {
error!("error adding {:?}: {:?}", &path, err);
continue;
}
};
handles.push(handle);
}
if download_opts.list {
Ok(())
} else if added {
if download_opts.exit_on_finish {
let results = futures::future::join_all(
handles.iter().map(|h| h.wait_until_completed()),
)
.await;
if results.iter().any(|r| r.is_err()) {
anyhow::bail!("some downloads failed")
}
info!("All downloads completed, exiting");
Ok(())
} else {
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
}
}
} else {
anyhow::bail!("no torrents were added")
}
}
}
}
}