pub mod auth;
pub mod databases;
pub mod error;
pub mod services;
pub mod statistics;
pub mod torrent;
pub mod peer_tests;
use std::cmp::max;
use std::collections::HashMap;
use std::net::IpAddr;
use std::panic::Location;
use std::sync::Arc;
use std::time::Duration;
use auth::PeerKey;
use databases::driver::Driver;
use derive_more::Constructor;
use error::PeerKeyError;
use tokio::sync::mpsc::error::SendError;
use torrust_tracker_clock::clock::Time;
use torrust_tracker_configuration::v2_0_0::database;
use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT};
use torrust_tracker_located_error::Located;
use torrust_tracker_primitives::info_hash::InfoHash;
use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics;
use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch};
use torrust_tracker_torrent_repository::entry::EntrySync;
use torrust_tracker_torrent_repository::repository::Repository;
use self::auth::Key;
use self::error::Error;
use self::torrent::Torrents;
use crate::core::databases::Database;
use crate::CurrentClock;
pub struct Tracker {
config: Core,
database: Arc<Box<dyn Database>>,
keys: tokio::sync::RwLock<std::collections::HashMap<Key, auth::PeerKey>>,
whitelist: tokio::sync::RwLock<std::collections::HashSet<InfoHash>>,
torrents: Arc<Torrents>,
stats_event_sender: Option<Box<dyn statistics::EventSender>>,
stats_repository: statistics::Repo,
}
#[derive(Clone, Debug, PartialEq, Constructor, Default)]
pub struct AnnounceData {
pub peers: Vec<Arc<peer::Peer>>,
pub stats: SwarmMetadata,
pub policy: AnnouncePolicy,
}
#[derive(Clone, Debug, PartialEq, Default)]
pub enum PeersWanted {
#[default]
All,
Only { amount: usize },
}
impl PeersWanted {
#[must_use]
pub fn only(limit: u32) -> Self {
let amount: usize = match limit.try_into() {
Ok(amount) => amount,
Err(_) => TORRENT_PEERS_LIMIT,
};
Self::Only { amount }
}
fn limit(&self) -> usize {
match self {
PeersWanted::All => TORRENT_PEERS_LIMIT,
PeersWanted::Only { amount } => *amount,
}
}
}
impl From<i32> for PeersWanted {
fn from(value: i32) -> Self {
if value > 0 {
match value.try_into() {
Ok(peers_wanted) => Self::Only { amount: peers_wanted },
Err(_) => Self::All,
}
} else {
Self::All
}
}
}
#[derive(Debug, PartialEq, Default)]
pub struct ScrapeData {
pub files: HashMap<InfoHash, SwarmMetadata>,
}
impl ScrapeData {
#[must_use]
pub fn empty() -> Self {
let files: HashMap<InfoHash, SwarmMetadata> = HashMap::new();
Self { files }
}
#[must_use]
pub fn zeroed(info_hashes: &Vec<InfoHash>) -> Self {
let mut scrape_data = Self::empty();
for info_hash in info_hashes {
scrape_data.add_file(info_hash, SwarmMetadata::zeroed());
}
scrape_data
}
pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) {
self.files.insert(*info_hash, swarm_metadata);
}
pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) {
self.files.insert(*info_hash, SwarmMetadata::zeroed());
}
}
#[derive(Debug)]
pub struct AddKeyRequest {
pub opt_key: Option<String>,
pub opt_seconds_valid: Option<u64>,
}
impl Tracker {
pub fn new(
config: &Core,
stats_event_sender: Option<Box<dyn statistics::EventSender>>,
stats_repository: statistics::Repo,
) -> Result<Tracker, databases::error::Error> {
let driver = match config.database.driver {
database::Driver::Sqlite3 => Driver::Sqlite3,
database::Driver::MySQL => Driver::MySQL,
};
let database = Arc::new(databases::driver::build(&driver, &config.database.path)?);
Ok(Tracker {
config: config.clone(),
keys: tokio::sync::RwLock::new(std::collections::HashMap::new()),
whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()),
torrents: Arc::default(),
stats_event_sender,
stats_repository,
database,
})
}
pub fn is_public(&self) -> bool {
!self.config.private
}
pub fn is_private(&self) -> bool {
self.config.private
}
pub fn is_listed(&self) -> bool {
self.config.listed
}
pub fn requires_authentication(&self) -> bool {
self.is_private()
}
pub fn is_behind_reverse_proxy(&self) -> bool {
self.config.net.on_reverse_proxy
}
pub fn get_announce_policy(&self) -> AnnouncePolicy {
self.config.announce_policy
}
pub fn get_maybe_external_ip(&self) -> Option<IpAddr> {
self.config.net.external_ip
}
pub fn announce(
&self,
info_hash: &InfoHash,
peer: &mut peer::Peer,
remote_client_ip: &IpAddr,
peers_wanted: &PeersWanted,
) -> AnnounceData {
tracing::debug!("Before: {peer:?}");
peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip));
tracing::debug!("After: {peer:?}");
let stats = self.upsert_peer_and_get_stats(info_hash, peer);
let peers = self.get_peers_for(info_hash, peer, peers_wanted.limit());
AnnounceData {
peers,
stats,
policy: self.get_announce_policy(),
}
}
pub async fn scrape(&self, info_hashes: &Vec<InfoHash>) -> ScrapeData {
let mut scrape_data = ScrapeData::empty();
for info_hash in info_hashes {
let swarm_metadata = match self.authorize(info_hash).await {
Ok(()) => self.get_swarm_metadata(info_hash),
Err(_) => SwarmMetadata::zeroed(),
};
scrape_data.add_file(info_hash, swarm_metadata);
}
scrape_data
}
fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata {
match self.torrents.get(info_hash) {
Some(torrent_entry) => torrent_entry.get_swarm_metadata(),
None => SwarmMetadata::default(),
}
}
pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> {
let persistent_torrents = self.database.load_persistent_torrents()?;
self.torrents.import_persistent(&persistent_torrents);
Ok(())
}
fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec<Arc<peer::Peer>> {
match self.torrents.get(info_hash) {
None => vec![],
Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))),
}
}
pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec<Arc<peer::Peer>> {
match self.torrents.get(info_hash) {
None => vec![],
Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)),
}
}
pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata {
let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) {
Some(swarm_metadata) => swarm_metadata,
None => SwarmMetadata::zeroed(),
};
self.torrents.upsert_peer(info_hash, peer);
let swarm_metadata_after = match self.torrents.get_swarm_metadata(info_hash) {
Some(swarm_metadata) => swarm_metadata,
None => SwarmMetadata::zeroed(),
};
if swarm_metadata_before != swarm_metadata_after {
self.persist_stats(info_hash, &swarm_metadata_after);
}
swarm_metadata_after
}
fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) {
if self.config.tracker_policy.persistent_torrent_completed_stat {
let completed = swarm_metadata.downloaded;
let info_hash = *info_hash;
drop(self.database.save_persistent_torrent(&info_hash, completed));
}
}
pub fn get_torrents_metrics(&self) -> TorrentsMetrics {
self.torrents.get_metrics()
}
pub fn cleanup_torrents(&self) {
let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout)))
.unwrap_or_default();
self.torrents.remove_inactive_peers(current_cutoff);
if self.config.tracker_policy.remove_peerless_torrents {
self.torrents.remove_peerless_torrents(&self.config.tracker_policy);
}
}
pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> {
if self.is_private() {
self.verify_auth_key(key).await
} else {
Ok(())
}
}
pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result<auth::PeerKey, PeerKeyError> {
match add_key_req.opt_key {
Some(pre_existing_key) => {
if let Some(seconds_valid) = add_key_req.opt_seconds_valid {
let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else {
return Err(PeerKeyError::DurationOverflow { seconds_valid });
};
let key = pre_existing_key.parse::<Key>();
match key {
Ok(key) => match self.add_auth_key(key, Some(valid_until)).await {
Ok(auth_key) => Ok(auth_key),
Err(err) => Err(PeerKeyError::DatabaseError {
source: Located(err).into(),
}),
},
Err(err) => Err(PeerKeyError::InvalidKey {
key: pre_existing_key,
source: Located(err).into(),
}),
}
} else {
let key = pre_existing_key.parse::<Key>();
match key {
Ok(key) => match self.add_permanent_auth_key(key).await {
Ok(auth_key) => Ok(auth_key),
Err(err) => Err(PeerKeyError::DatabaseError {
source: Located(err).into(),
}),
},
Err(err) => Err(PeerKeyError::InvalidKey {
key: pre_existing_key,
source: Located(err).into(),
}),
}
}
}
None => match add_key_req.opt_seconds_valid {
Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await {
Ok(auth_key) => Ok(auth_key),
Err(err) => Err(PeerKeyError::DatabaseError {
source: Located(err).into(),
}),
},
None => match self.generate_permanent_auth_key().await {
Ok(auth_key) => Ok(auth_key),
Err(err) => Err(PeerKeyError::DatabaseError {
source: Located(err).into(),
}),
},
},
}
}
pub async fn generate_permanent_auth_key(&self) -> Result<auth::PeerKey, databases::error::Error> {
self.generate_auth_key(None).await
}
pub async fn generate_auth_key(&self, lifetime: Option<Duration>) -> Result<auth::PeerKey, databases::error::Error> {
let auth_key = auth::generate_key(lifetime);
self.database.add_key_to_keys(&auth_key)?;
self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone());
Ok(auth_key)
}
pub async fn add_permanent_auth_key(&self, key: Key) -> Result<auth::PeerKey, databases::error::Error> {
self.add_auth_key(key, None).await
}
pub async fn add_auth_key(
&self,
key: Key,
valid_until: Option<DurationSinceUnixEpoch>,
) -> Result<auth::PeerKey, databases::error::Error> {
let auth_key = PeerKey { key, valid_until };
self.database.add_key_to_keys(&auth_key)?;
self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone());
Ok(auth_key)
}
pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> {
self.database.remove_key_from_keys(key)?;
self.keys.write().await.remove(key);
Ok(())
}
async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> {
match self.keys.read().await.get(key) {
None => Err(auth::Error::UnableToReadKey {
location: Location::caller(),
key: Box::new(key.clone()),
}),
Some(key) => match self.config.private_mode {
Some(private_mode) => {
if private_mode.check_keys_expiration {
return auth::verify_key_expiration(key);
}
Ok(())
}
None => auth::verify_key_expiration(key),
},
}
}
pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> {
let keys_from_database = self.database.load_keys()?;
let mut keys = self.keys.write().await;
keys.clear();
for key in keys_from_database {
keys.insert(key.key.clone(), key);
}
Ok(())
}
pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> {
if !self.is_listed() {
return Ok(());
}
if self.is_info_hash_whitelisted(info_hash).await {
return Ok(());
}
Err(Error::TorrentNotWhitelisted {
info_hash: *info_hash,
location: Location::caller(),
})
}
pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> {
self.add_torrent_to_database_whitelist(info_hash)?;
self.add_torrent_to_memory_whitelist(info_hash).await;
Ok(())
}
fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> {
let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?;
if is_whitelisted {
return Ok(());
}
self.database.add_info_hash_to_whitelist(*info_hash)?;
Ok(())
}
pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool {
self.whitelist.write().await.insert(*info_hash)
}
pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> {
self.remove_torrent_from_database_whitelist(info_hash)?;
self.remove_torrent_from_memory_whitelist(info_hash).await;
Ok(())
}
pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> {
let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?;
if !is_whitelisted {
return Ok(());
}
self.database.remove_info_hash_from_whitelist(*info_hash)?;
Ok(())
}
pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool {
self.whitelist.write().await.remove(info_hash)
}
pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool {
self.whitelist.read().await.contains(info_hash)
}
pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> {
let whitelisted_torrents_from_database = self.database.load_whitelist()?;
let mut whitelist = self.whitelist.write().await;
whitelist.clear();
for info_hash in whitelisted_torrents_from_database {
let _: bool = whitelist.insert(info_hash);
}
Ok(())
}
pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::Metrics> {
self.stats_repository.get_stats().await
}
pub async fn send_stats_event(&self, event: statistics::Event) -> Option<Result<(), SendError<statistics::Event>>> {
match &self.stats_event_sender {
None => None,
Some(stats_event_sender) => stats_event_sender.send_event(event).await,
}
}
pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> {
self.database.drop_database_tables()
}
}
#[must_use]
fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option<IpAddr>) -> IpAddr {
if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) {
host_ip
} else {
*remote_client_ip
}
}
#[cfg(test)]
mod tests {
mod the_tracker {
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId};
use torrust_tracker_configuration::TORRENT_PEERS_LIMIT;
use torrust_tracker_primitives::info_hash::InfoHash;
use torrust_tracker_primitives::DurationSinceUnixEpoch;
use torrust_tracker_test_helpers::configuration;
use crate::core::peer::Peer;
use crate::core::services::tracker_factory;
use crate::core::{TorrentsMetrics, Tracker};
use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash;
fn public_tracker() -> Tracker {
tracker_factory(&configuration::ephemeral_public())
}
fn private_tracker() -> Tracker {
tracker_factory(&configuration::ephemeral_private())
}
fn whitelisted_tracker() -> Tracker {
tracker_factory(&configuration::ephemeral_listed())
}
pub fn tracker_persisting_torrents_in_database() -> Tracker {
let mut configuration = configuration::ephemeral();
configuration.core.tracker_policy.persistent_torrent_completed_stat = true;
tracker_factory(&configuration)
}
fn sample_info_hash() -> InfoHash {
"3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap()
}
fn peer_ip() -> IpAddr {
IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap())
}
fn sample_peer() -> Peer {
complete_peer()
}
fn sample_peer_1() -> Peer {
Peer {
peer_id: PeerId(*b"-qB00000000000000001"),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(0),
event: AnnounceEvent::Completed,
}
}
fn sample_peer_2() -> Peer {
Peer {
peer_id: PeerId(*b"-qB00000000000000002"),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(0),
event: AnnounceEvent::Completed,
}
}
fn seeder() -> Peer {
complete_peer()
}
fn leecher() -> Peer {
incomplete_peer()
}
fn started_peer() -> Peer {
incomplete_peer()
}
fn completed_peer() -> Peer {
complete_peer()
}
fn complete_peer() -> Peer {
Peer {
peer_id: PeerId(*b"-qB00000000000000000"),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(0), event: AnnounceEvent::Completed,
}
}
fn incomplete_peer() -> Peer {
Peer {
peer_id: PeerId(*b"-qB00000000000000000"),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(1000), event: AnnounceEvent::Started,
}
}
#[tokio::test]
async fn should_collect_torrent_metrics() {
let tracker = public_tracker();
let torrents_metrics = tracker.get_torrents_metrics();
assert_eq!(
torrents_metrics,
TorrentsMetrics {
complete: 0,
downloaded: 0,
incomplete: 0,
torrents: 0
}
);
}
#[tokio::test]
async fn it_should_return_the_peers_for_a_given_torrent() {
let tracker = public_tracker();
let info_hash = sample_info_hash();
let peer = sample_peer();
tracker.upsert_peer_and_get_stats(&info_hash, &peer);
let peers = tracker.get_torrent_peers(&info_hash);
assert_eq!(peers, vec![Arc::new(peer)]);
}
fn numeric_peer_id(two_digits_value: i32) -> PeerId {
let idx_str = format!("{two_digits_value:02}");
let base = b"-qB00000000000000000";
let mut peer_id_bytes = [0u8; 20];
peer_id_bytes[..base.len()].copy_from_slice(base);
peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes());
PeerId(peer_id_bytes)
}
#[tokio::test]
async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() {
let tracker = public_tracker();
let info_hash = sample_info_hash();
for idx in 1..=75 {
let peer = Peer {
peer_id: numeric_peer_id(idx),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(0), event: AnnounceEvent::Completed,
};
tracker.upsert_peer_and_get_stats(&info_hash, &peer);
}
let peers = tracker.get_torrent_peers(&info_hash);
assert_eq!(peers.len(), 74);
}
#[tokio::test]
async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() {
let tracker = public_tracker();
let info_hash = sample_info_hash();
let peer = sample_peer();
tracker.upsert_peer_and_get_stats(&info_hash, &peer);
let peers = tracker.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT);
assert_eq!(peers, vec![]);
}
#[tokio::test]
async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() {
let tracker = public_tracker();
let info_hash = sample_info_hash();
let excluded_peer = sample_peer();
tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer);
for idx in 2..=75 {
let peer = Peer {
peer_id: numeric_peer_id(idx),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080),
updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0),
uploaded: NumberOfBytes::new(0),
downloaded: NumberOfBytes::new(0),
left: NumberOfBytes::new(0), event: AnnounceEvent::Completed,
};
tracker.upsert_peer_and_get_stats(&info_hash, &peer);
}
let peers = tracker.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT);
assert_eq!(peers.len(), 74);
}
#[tokio::test]
async fn it_should_return_the_torrent_metrics() {
let tracker = public_tracker();
tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher());
let torrent_metrics = tracker.get_torrents_metrics();
assert_eq!(
torrent_metrics,
TorrentsMetrics {
complete: 0,
downloaded: 0,
incomplete: 1,
torrents: 1,
}
);
}
#[tokio::test]
async fn it_should_get_many_the_torrent_metrics() {
let tracker = public_tracker();
let start_time = std::time::Instant::now();
for i in 0..1_000_000 {
tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher());
}
let result_a = start_time.elapsed();
let start_time = std::time::Instant::now();
let torrent_metrics = tracker.get_torrents_metrics();
let result_b = start_time.elapsed();
assert_eq!(
(torrent_metrics),
(TorrentsMetrics {
complete: 0,
downloaded: 0,
incomplete: 1_000_000,
torrents: 1_000_000,
}),
"{result_a:?} {result_b:?}"
);
}
mod for_all_config_modes {
mod handling_an_announce_request {
use std::sync::Arc;
use crate::core::tests::the_tracker::{
peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2,
};
use crate::core::PeersWanted;
mod should_assign_the_ip_to_the_peer {
use std::net::{IpAddr, Ipv4Addr};
use crate::core::assign_ip_address_to_peer;
#[test]
fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() {
let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2));
let peer_ip = assign_ip_address_to_peer(&remote_ip, None);
assert_eq!(peer_ip, remote_ip);
}
mod and_when_the_client_ip_is_a_ipv4_loopback_ip {
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
use crate::core::assign_ip_address_to_peer;
#[test]
fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() {
let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST);
let peer_ip = assign_ip_address_to_peer(&remote_ip, None);
assert_eq!(peer_ip, remote_ip);
}
#[test]
fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() {
let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST);
let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap());
let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip));
assert_eq!(peer_ip, tracker_external_ip);
}
#[test]
fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip(
) {
let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST);
let tracker_external_ip =
IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap());
let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip));
assert_eq!(peer_ip, tracker_external_ip);
}
}
mod and_when_client_ip_is_a_ipv6_loopback_ip {
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
use crate::core::assign_ip_address_to_peer;
#[test]
fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() {
let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST);
let peer_ip = assign_ip_address_to_peer(&remote_ip, None);
assert_eq!(peer_ip, remote_ip);
}
#[test]
fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() {
let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST);
let tracker_external_ip =
IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap());
let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip));
assert_eq!(peer_ip, tracker_external_ip);
}
#[test]
fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip(
) {
let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST);
let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap());
let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip));
assert_eq!(peer_ip, tracker_external_ip);
}
}
}
#[tokio::test]
async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() {
let tracker = public_tracker();
let mut peer = sample_peer();
let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All);
assert_eq!(announce_data.peers, vec![]);
}
#[tokio::test]
async fn it_should_return_the_announce_data_with_the_previously_announced_peers() {
let tracker = public_tracker();
let mut previously_announced_peer = sample_peer_1();
tracker.announce(
&sample_info_hash(),
&mut previously_announced_peer,
&peer_ip(),
&PeersWanted::All,
);
let mut peer = sample_peer_2();
let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All);
assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]);
}
mod it_should_update_the_swarm_stats_for_the_torrent {
use crate::core::tests::the_tracker::{
completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer,
};
use crate::core::PeersWanted;
#[tokio::test]
async fn when_the_peer_is_a_seeder() {
let tracker = public_tracker();
let mut peer = seeder();
let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All);
assert_eq!(announce_data.stats.complete, 1);
}
#[tokio::test]
async fn when_the_peer_is_a_leecher() {
let tracker = public_tracker();
let mut peer = leecher();
let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All);
assert_eq!(announce_data.stats.incomplete, 1);
}
#[tokio::test]
async fn when_a_previously_announced_started_peer_has_completed_downloading() {
let tracker = public_tracker();
let mut started_peer = started_peer();
tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All);
let mut completed_peer = completed_peer();
let announce_data =
tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All);
assert_eq!(announce_data.stats.downloaded, 1);
}
}
}
mod handling_a_scrape_request {
use std::net::{IpAddr, Ipv4Addr};
use torrust_tracker_primitives::info_hash::InfoHash;
use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker};
use crate::core::{PeersWanted, ScrapeData, SwarmMetadata};
#[tokio::test]
async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent(
) {
let tracker = public_tracker();
let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap()];
let scrape_data = tracker.scrape(&info_hashes).await;
let mut expected_scrape_data = ScrapeData::empty();
expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]);
assert_eq!(scrape_data, expected_scrape_data);
}
#[tokio::test]
async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() {
let tracker = public_tracker();
let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap();
let mut complete_peer = complete_peer();
tracker.announce(
&info_hash,
&mut complete_peer,
&IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)),
&PeersWanted::All,
);
let mut incomplete_peer = incomplete_peer();
tracker.announce(
&info_hash,
&mut incomplete_peer,
&IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)),
&PeersWanted::All,
);
let scrape_data = tracker.scrape(&vec![info_hash]).await;
let mut expected_scrape_data = ScrapeData::empty();
expected_scrape_data.add_file(
&info_hash,
SwarmMetadata {
complete: 0, downloaded: 0,
incomplete: 1, },
);
assert_eq!(scrape_data, expected_scrape_data);
}
#[tokio::test]
async fn it_should_allow_scraping_for_multiple_torrents() {
let tracker = public_tracker();
let info_hashes = vec![
"3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap(),
"99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::<InfoHash>().unwrap(),
];
let scrape_data = tracker.scrape(&info_hashes).await;
let mut expected_scrape_data = ScrapeData::empty();
expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]);
expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]);
assert_eq!(scrape_data, expected_scrape_data);
}
}
}
mod configured_as_whitelisted {
mod handling_authorization {
use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker};
#[tokio::test]
async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() {
let tracker = whitelisted_tracker();
let info_hash = sample_info_hash();
let result = tracker.add_torrent_to_whitelist(&info_hash).await;
assert!(result.is_ok());
let result = tracker.authorize(&info_hash).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() {
let tracker = whitelisted_tracker();
let info_hash = sample_info_hash();
let result = tracker.authorize(&info_hash).await;
assert!(result.is_err());
}
}
mod handling_the_torrent_whitelist {
use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker};
#[tokio::test]
async fn it_should_add_a_torrent_to_the_whitelist() {
let tracker = whitelisted_tracker();
let info_hash = sample_info_hash();
tracker.add_torrent_to_whitelist(&info_hash).await.unwrap();
assert!(tracker.is_info_hash_whitelisted(&info_hash).await);
}
#[tokio::test]
async fn it_should_remove_a_torrent_from_the_whitelist() {
let tracker = whitelisted_tracker();
let info_hash = sample_info_hash();
tracker.add_torrent_to_whitelist(&info_hash).await.unwrap();
tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap();
assert!(!tracker.is_info_hash_whitelisted(&info_hash).await);
}
mod persistence {
use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker};
#[tokio::test]
async fn it_should_load_the_whitelist_from_the_database() {
let tracker = whitelisted_tracker();
let info_hash = sample_info_hash();
tracker.add_torrent_to_whitelist(&info_hash).await.unwrap();
tracker.whitelist.write().await.remove(&info_hash);
assert!(!tracker.is_info_hash_whitelisted(&info_hash).await);
tracker.load_whitelist_from_database().await.unwrap();
assert!(tracker.is_info_hash_whitelisted(&info_hash).await);
}
}
}
mod handling_an_announce_request {}
mod handling_an_scrape_request {
use torrust_tracker_primitives::info_hash::InfoHash;
use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
use crate::core::tests::the_tracker::{
complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker,
};
use crate::core::{PeersWanted, ScrapeData};
#[test]
fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() {
let sample_info_hash = sample_info_hash();
let mut expected_scrape_data = ScrapeData::empty();
expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash);
assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data);
}
#[tokio::test]
async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() {
let tracker = whitelisted_tracker();
let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap();
let mut peer = incomplete_peer();
tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All);
let mut peer = complete_peer();
tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All);
let scrape_data = tracker.scrape(&vec![info_hash]).await;
let mut expected_scrape_data = ScrapeData::empty();
expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed());
assert_eq!(scrape_data, expected_scrape_data);
}
}
}
mod configured_as_private {
mod handling_authentication {
use std::str::FromStr;
use std::time::Duration;
use crate::core::auth::{self};
use crate::core::tests::the_tracker::private_tracker;
#[tokio::test]
async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() {
let tracker = private_tracker();
let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap();
let result = tracker.authenticate(&unregistered_key).await;
assert!(result.is_err());
}
#[tokio::test]
async fn it_should_fail_verifying_an_unregistered_authentication_key() {
let tracker = private_tracker();
let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap();
assert!(tracker.verify_auth_key(&unregistered_key).await.is_err());
}
#[tokio::test]
async fn it_should_remove_an_authentication_key() {
let tracker = private_tracker();
let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap();
let result = tracker.remove_auth_key(&expiring_key.key()).await;
assert!(result.is_ok());
assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err());
}
#[tokio::test]
async fn it_should_load_authentication_keys_from_the_database() {
let tracker = private_tracker();
let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap();
tracker.keys.write().await.remove(&expiring_key.key());
let result = tracker.load_keys_from_database().await;
assert!(result.is_ok());
assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok());
}
mod with_expiring_and {
mod randomly_generated_keys {
use std::time::Duration;
use torrust_tracker_clock::clock::Time;
use torrust_tracker_configuration::v2_0_0::core::PrivateMode;
use crate::core::auth::Key;
use crate::core::tests::the_tracker::private_tracker;
use crate::CurrentClock;
#[tokio::test]
async fn it_should_generate_the_key() {
let tracker = private_tracker();
let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap();
assert_eq!(
peer_key.valid_until,
Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap())
);
}
#[tokio::test]
async fn it_should_authenticate_a_peer_with_the_key() {
let tracker = private_tracker();
let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap();
let result = tracker.authenticate(&peer_key.key()).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() {
let mut tracker = private_tracker();
tracker.config.private_mode = Some(PrivateMode {
check_keys_expiration: false,
});
let past_timestamp = Duration::ZERO;
let peer_key = tracker
.add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp))
.await
.unwrap();
assert!(tracker.authenticate(&peer_key.key()).await.is_ok());
}
}
mod pre_generated_keys {
use std::time::Duration;
use torrust_tracker_clock::clock::Time;
use torrust_tracker_configuration::v2_0_0::core::PrivateMode;
use crate::core::auth::Key;
use crate::core::tests::the_tracker::private_tracker;
use crate::core::AddKeyRequest;
use crate::CurrentClock;
#[tokio::test]
async fn it_should_add_a_pre_generated_key() {
let tracker = private_tracker();
let peer_key = tracker
.add_peer_key(AddKeyRequest {
opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()),
opt_seconds_valid: Some(100),
})
.await
.unwrap();
assert_eq!(
peer_key.valid_until,
Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap())
);
}
#[tokio::test]
async fn it_should_authenticate_a_peer_with_the_key() {
let tracker = private_tracker();
let peer_key = tracker
.add_peer_key(AddKeyRequest {
opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()),
opt_seconds_valid: Some(100),
})
.await
.unwrap();
let result = tracker.authenticate(&peer_key.key()).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() {
let mut tracker = private_tracker();
tracker.config.private_mode = Some(PrivateMode {
check_keys_expiration: false,
});
let peer_key = tracker
.add_peer_key(AddKeyRequest {
opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()),
opt_seconds_valid: Some(0),
})
.await
.unwrap();
assert!(tracker.authenticate(&peer_key.key()).await.is_ok());
}
}
}
mod with_permanent_and {
mod randomly_generated_keys {
use crate::core::tests::the_tracker::private_tracker;
#[tokio::test]
async fn it_should_generate_the_key() {
let tracker = private_tracker();
let peer_key = tracker.generate_permanent_auth_key().await.unwrap();
assert_eq!(peer_key.valid_until, None);
}
#[tokio::test]
async fn it_should_authenticate_a_peer_with_the_key() {
let tracker = private_tracker();
let peer_key = tracker.generate_permanent_auth_key().await.unwrap();
let result = tracker.authenticate(&peer_key.key()).await;
assert!(result.is_ok());
}
}
mod pre_generated_keys {
use crate::core::auth::Key;
use crate::core::tests::the_tracker::private_tracker;
use crate::core::AddKeyRequest;
#[tokio::test]
async fn it_should_add_a_pre_generated_key() {
let tracker = private_tracker();
let peer_key = tracker
.add_peer_key(AddKeyRequest {
opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()),
opt_seconds_valid: None,
})
.await
.unwrap();
assert_eq!(peer_key.valid_until, None);
}
#[tokio::test]
async fn it_should_authenticate_a_peer_with_the_key() {
let tracker = private_tracker();
let peer_key = tracker
.add_peer_key(AddKeyRequest {
opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()),
opt_seconds_valid: None,
})
.await
.unwrap();
let result = tracker.authenticate(&peer_key.key()).await;
assert!(result.is_ok());
}
}
}
}
mod handling_an_announce_request {}
mod handling_an_scrape_request {}
}
mod configured_as_private_and_whitelisted {
mod handling_an_announce_request {}
mod handling_an_scrape_request {}
}
mod handling_torrent_persistence {
use aquatic_udp_protocol::AnnounceEvent;
use torrust_tracker_torrent_repository::entry::EntrySync;
use torrust_tracker_torrent_repository::repository::Repository;
use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database};
#[tokio::test]
async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() {
let tracker = tracker_persisting_torrents_in_database();
let info_hash = sample_info_hash();
let mut peer = sample_peer();
peer.event = AnnounceEvent::Started;
let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer);
assert_eq!(swarm_stats.downloaded, 0);
peer.event = AnnounceEvent::Completed;
let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer);
assert_eq!(swarm_stats.downloaded, 1);
tracker.torrents.remove(&info_hash);
tracker.load_torrents_from_database().unwrap();
let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry");
assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1);
assert!(torrent_entry.peers_is_empty());
}
}
}
}