diff --git a/cSpell.json b/cSpell.json index 9f10d99e4..a451d18dc 100644 --- a/cSpell.json +++ b/cSpell.json @@ -73,6 +73,7 @@ "uroot", "Vagaa", "Vuze", + "whitespaces", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", diff --git a/src/http/axum_implementation/extractors.rs b/src/http/axum_implementation/extractors.rs new file mode 100644 index 000000000..a1f3fad1e --- /dev/null +++ b/src/http/axum_implementation/extractors.rs @@ -0,0 +1,159 @@ +use std::panic::Location; +use std::str::FromStr; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::http::StatusCode; +use thiserror::Error; + +use super::query::Query; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +pub struct ExtractAnnounceParams(pub AnnounceParams); + +#[derive(Debug, PartialEq)] +pub struct AnnounceParams { + pub info_hash: InfoHash, + pub peer_id: peer::Id, + pub port: u16, +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing infohash {location}")] + MissingInfoHash { location: &'static Location<'static> }, + #[error("invalid infohash {location}")] + InvalidInfoHash { location: &'static Location<'static> }, + #[error("missing peer id {location}")] + MissingPeerId { location: &'static Location<'static> }, + #[error("invalid peer id {location}")] + InvalidPeerId { location: &'static Location<'static> }, + #[error("missing port {location}")] + MissingPort { location: &'static Location<'static> }, + #[error("invalid port {location}")] + InvalidPort { location: &'static Location<'static> }, +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: IdConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: ConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl TryFrom for AnnounceParams { + type Error = ParseAnnounceQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hash: extract_info_hash(&query)?, + peer_id: extract_peer_id(&query)?, + port: extract_port(&query)?, + }) + } +} + +fn extract_info_hash(query: &Query) -> Result { + match query.get_param("info_hash") { + Some(raw_info_hash) => Ok(percent_decode_info_hash(&raw_info_hash)?), + None => { + return Err(ParseAnnounceQueryError::MissingInfoHash { + location: Location::caller(), + }) + } + } +} + +fn extract_peer_id(query: &Query) -> Result { + match query.get_param("peer_id") { + Some(raw_peer_id) => Ok(percent_decode_peer_id(&raw_peer_id)?), + None => { + return Err(ParseAnnounceQueryError::MissingPeerId { + location: Location::caller(), + }) + } + } +} + +fn extract_port(query: &Query) -> Result { + match query.get_param("port") { + Some(raw_port) => Ok(u16::from_str(&raw_port).map_err(|_e| ParseAnnounceQueryError::InvalidPort { + location: Location::caller(), + })?), + None => { + return Err(ParseAnnounceQueryError::MissingPort { + location: Location::caller(), + }) + } + } +} + +#[async_trait] +impl FromRequestParts for ExtractAnnounceParams +where + S: Send + Sync, +{ + type Rejection = (StatusCode, &'static str); + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err((StatusCode::BAD_REQUEST, "missing query params")); + } + + let query = raw_query.unwrap().parse::(); + + if query.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params")); + } + + let announce_params = AnnounceParams::try_from(query.unwrap()); + + if announce_params.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); + } + + Ok(ExtractAnnounceParams(announce_params.unwrap())) + } +} + +#[cfg(test)] +mod tests { + use super::AnnounceParams; + use crate::http::axum_implementation::query::Query; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn announce_request_params_should_be_extracted_from_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + let announce_params = AnnounceParams::try_from(query).unwrap(); + + assert_eq!( + announce_params, + AnnounceParams { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + } + ); + } +} diff --git a/src/http/axum_implementation/handlers.rs b/src/http/axum_implementation/handlers.rs new file mode 100644 index 000000000..050fa8e69 --- /dev/null +++ b/src/http/axum_implementation/handlers.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::Json; + +use super::extractors::ExtractAnnounceParams; +use super::resources::ok::Ok; +use super::responses::ok_response; +use crate::tracker::Tracker; + +#[allow(clippy::unused_async)] +pub async fn get_status_handler() -> Json { + ok_response() +} + +/// # Panics +/// +/// todo +#[allow(clippy::unused_async)] +pub async fn announce_handler( + State(_tracker): State>, + ExtractAnnounceParams(_announce_params): ExtractAnnounceParams, +) -> Json { + todo!() +} diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs new file mode 100644 index 000000000..9d96362df --- /dev/null +++ b/src/http/axum_implementation/mod.rs @@ -0,0 +1,7 @@ +pub mod extractors; +pub mod handlers; +pub mod query; +pub mod resources; +pub mod responses; +pub mod routes; +pub mod server; diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs new file mode 100644 index 000000000..c7c20b22d --- /dev/null +++ b/src/http/axum_implementation/query.rs @@ -0,0 +1,138 @@ +use std::collections::HashMap; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; +pub struct Query { + params: HashMap, +} + +#[derive(Error, Debug)] +pub enum ParseQueryError { + #[error("invalid param {raw_param} in {location}")] + InvalidParam { + location: &'static Location<'static>, + raw_param: String, + }, +} + +impl FromStr for Query { + type Err = ParseQueryError; + + fn from_str(raw_query: &str) -> Result { + let mut params: HashMap = HashMap::new(); + + let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); + + for raw_param in raw_params { + let param: Param = raw_param.parse()?; + params.insert(param.name, param.value); + } + + Ok(Self { params }) + } +} + +#[derive(Debug, PartialEq)] +struct Param { + name: String, + value: String, +} + +impl FromStr for Param { + type Err = ParseQueryError; + + fn from_str(raw_param: &str) -> Result { + let pair = raw_param.split('=').collect::>(); + + if pair.len() > 2 { + return Err(ParseQueryError::InvalidParam { + location: Location::caller(), + raw_param: raw_param.to_owned(), + }); + } + + Ok(Self { + name: pair[0].to_owned(), + value: pair[1].to_owned(), + }) + } +} + +impl Query { + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(std::string::ToString::to_string) + } +} + +#[cfg(test)] +mod tests { + use super::Query; + use crate::http::axum_implementation::query::Param; + + #[test] + fn it_should_parse_the_query_params_from_an_url_query_string() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; + + let query = invalid_raw_query.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn it_should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_trim_whitespaces() { + let raw_query = " name=value "; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); + + assert_eq!( + param, + Param { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); + + assert!(query.is_err()); + } +} diff --git a/src/http/axum_implementation/resources/mod.rs b/src/http/axum_implementation/resources/mod.rs new file mode 100644 index 000000000..a493c2ac2 --- /dev/null +++ b/src/http/axum_implementation/resources/mod.rs @@ -0,0 +1 @@ +pub mod ok; diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs new file mode 100644 index 000000000..adc56e6ea --- /dev/null +++ b/src/http/axum_implementation/resources/ok.rs @@ -0,0 +1,4 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Ok {} diff --git a/src/http/axum_implementation/responses.rs b/src/http/axum_implementation/responses.rs new file mode 100644 index 000000000..9c5896b35 --- /dev/null +++ b/src/http/axum_implementation/responses.rs @@ -0,0 +1,10 @@ +// Resource responses + +use axum::Json; + +use super::resources::ok::Ok; + +#[must_use] +pub fn ok_response() -> Json { + Json(Ok {}) +} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs new file mode 100644 index 000000000..8e4980682 --- /dev/null +++ b/src/http/axum_implementation/routes.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{announce_handler, get_status_handler}; +use crate::tracker::Tracker; + +pub fn router(tracker: &Arc) -> Router { + Router::new() + // Status + .route("/status", get(get_status_handler)) + // Announce request + .route("/announce", get(announce_handler).with_state(tracker.clone())) +} diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs new file mode 100644 index 000000000..541dda33e --- /dev/null +++ b/src/http/axum_implementation/server.rs @@ -0,0 +1,43 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use futures::Future; +use log::info; +use warp::hyper; + +use super::routes::router; +use crate::tracker::Tracker; + +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = router(tracker); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + ssl_config: RustlsConfig, + tracker: &Arc, +) -> impl Future> { + let app = router(tracker); + + let handle = Handle::new(); + let shutdown_handle = handle.clone(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) +} diff --git a/src/http/mod.rs b/src/http/mod.rs index fa4c263b5..039a2067b 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -9,13 +9,15 @@ //! - //! - //! -pub mod error; -pub mod filters; -pub mod handlers; -pub mod request; -pub mod response; -pub mod routes; -pub mod server; -pub type Bytes = u64; -pub type WebResult = std::result::Result; +use serde::{Deserialize, Serialize}; + +pub mod axum_implementation; +pub mod percent_encoding; +pub mod warp_implementation; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + Warp, + Axum, +} diff --git a/src/http/percent_encoding.rs b/src/http/percent_encoding.rs new file mode 100644 index 000000000..9b5b79ed7 --- /dev/null +++ b/src/http/percent_encoding.rs @@ -0,0 +1,66 @@ +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `InfoHash`. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `peer::Id`. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + peer::Id::try_from(bytes) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/src/http/error.rs b/src/http/warp_implementation/error.rs similarity index 100% rename from src/http/error.rs rename to src/http/warp_implementation/error.rs diff --git a/src/http/filters.rs b/src/http/warp_implementation/filters.rs similarity index 90% rename from src/http/filters.rs rename to src/http/warp_implementation/filters.rs index 2760c995c..176170330 100644 --- a/src/http/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -8,6 +8,7 @@ use warp::{reject, Filter, Rejection}; use super::error::Error; use super::{request, WebResult}; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer}; @@ -78,9 +79,11 @@ fn info_hashes(raw_query: &String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { + // get raw percent encoded infohash let raw_info_hash = v.split('=').collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); + + let info_hash = percent_decode_info_hash(raw_info_hash); + if let Ok(ih) = info_hash { info_hashes.push(ih); } @@ -112,24 +115,17 @@ fn peer_id(raw_query: &String) -> WebResult { for v in split_raw_query { // look for the peer_id param if v.contains("peer_id") { - // get raw percent_encoded peer_id + // get raw percent encoded peer id let raw_peer_id = v.split('=').collect::>()[1]; - // decode peer_id - let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - - // peer_id must be 20 bytes - if peer_id_bytes.len() != 20 { + if let Ok(id) = percent_decode_peer_id(raw_peer_id) { + peer_id = Some(id); + } else { return Err(reject::custom(Error::InvalidPeerId { location: Location::caller(), })); } - // clone peer_id_bytes into fixed length array - let mut byte_arr: [u8; 20] = Default::default(); - byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - - peer_id = Some(peer::Id(byte_arr)); break; } } diff --git a/src/http/handlers.rs b/src/http/warp_implementation/handlers.rs similarity index 100% rename from src/http/handlers.rs rename to src/http/warp_implementation/handlers.rs diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs new file mode 100644 index 000000000..4fbfb48fb --- /dev/null +++ b/src/http/warp_implementation/mod.rs @@ -0,0 +1,12 @@ +use warp::Rejection; + +pub mod error; +pub mod filters; +pub mod handlers; +pub mod request; +pub mod response; +pub mod routes; +pub mod server; + +pub type Bytes = u64; +pub type WebResult = std::result::Result; diff --git a/src/http/request.rs b/src/http/warp_implementation/request.rs similarity index 94% rename from src/http/request.rs rename to src/http/warp_implementation/request.rs index bc549b698..f666b48c5 100644 --- a/src/http/request.rs +++ b/src/http/warp_implementation/request.rs @@ -2,7 +2,7 @@ use std::net::IpAddr; use serde::Deserialize; -use crate::http::Bytes; +use crate::http::warp_implementation::Bytes; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/response.rs b/src/http/warp_implementation/response.rs similarity index 100% rename from src/http/response.rs rename to src/http/warp_implementation/response.rs diff --git a/src/http/routes.rs b/src/http/warp_implementation/routes.rs similarity index 100% rename from src/http/routes.rs rename to src/http/warp_implementation/routes.rs diff --git a/src/http/server.rs b/src/http/warp_implementation/server.rs similarity index 100% rename from src/http/server.rs rename to src/http/warp_implementation/server.rs diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 8e38039b7..aa96af884 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,21 +1,31 @@ use std::net::SocketAddr; use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; use log::{info, warn}; use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; -use crate::http::server::Http; +use crate::http::axum_implementation::server; +use crate::http::warp_implementation::server::Http; +use crate::http::Version; use crate::tracker; #[derive(Debug)] pub struct ServerJobStarted(); +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { + match version { + Version::Warp => start_warp(config, tracker.clone()).await, + Version::Axum => start_axum(config, tracker.clone()).await, + } +} + /// # Panics /// -/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. -pub async fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_warp(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() @@ -68,3 +78,57 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc) -> join_handle } + +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_axum(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); + + let handle = server::start(bind_addr, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); + } + } + }); + + // Wait until the HTTP tracker server job is running + match rx.await { + Ok(_msg) => info!("Torrust HTTP tracker server started"), + Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + } + + join_handle +} diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 83a595c1f..320636725 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -1,7 +1,24 @@ +use std::panic::Location; + +use thiserror::Error; + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); +const INFO_HASH_BYTES_LEN: usize = 20; + impl InfoHash { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + /// For readability, when accessing the bytes array #[must_use] pub fn bytes(&self) -> [u8; 20] { @@ -57,6 +74,40 @@ impl std::convert::From<[u8; 20]> for InfoHash { } } +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + impl serde::ser::Serialize for InfoHash { fn serialize(&self, serializer: S) -> Result { let mut buffer = [0u8; 40]; @@ -166,6 +217,26 @@ mod tests { ); } + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + #[test] fn an_info_hash_can_be_serialized() { let s = ContainingInfoHash { diff --git a/src/setup.rs b/src/setup.rs index 31be3baac..3461667cc 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,6 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; +use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; @@ -47,7 +48,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone()).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Warp).await); } // Start HTTP API diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3f639f970..04e4cdb45 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,10 +1,12 @@ use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; +use thiserror::Error; -use crate::http::request::Announce; +use crate::http::warp_implementation::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; @@ -91,6 +93,69 @@ impl Peer { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct Id(pub [u8; 20]); +const PEER_ID_BYTES_LEN: usize = 20; + +#[derive(Error, Debug)] +pub enum IdConversionError { + #[error("not enough bytes for peer id: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for peer id: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), PEER_ID_BYTES_LEN); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } +} + +impl From<[u8; 20]> for Id { + fn from(bytes: [u8; 20]) -> Self { + Id(bytes) + } +} + +impl TryFrom> for Id { + type Error = IdConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < PEER_ID_BYTES_LEN { + return Err(IdConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + if bytes.len() > PEER_ID_BYTES_LEN { + return Err(IdConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl std::str::FromStr for Id { + type Err = IdConversionError; + + fn from_str(s: &str) -> Result { + Self::try_from(s.as_bytes().to_vec()) + } +} + impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.to_hex_string() { @@ -239,6 +304,86 @@ mod test { mod torrent_peer_id { use crate::tracker::peer; + #[test] + fn should_be_instantiated_from_a_byte_slice() { + let id = peer::Id::from_bytes(&[ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { + let less_than_20_bytes = [0; 19]; + let _ = peer::Id::from_bytes(&less_than_20_bytes); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { + let more_than_20_bytes = [0; 21]; + let _ = peer::Id::from_bytes(&more_than_20_bytes); + } + + #[test] + fn should_be_instantiated_from_a_string() { + let id = "-qB00000000000000001".parse::().unwrap(); + + let expected_id = peer::Id([ + 45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_20_byte_array() { + let id = peer::Id::from([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_byte_vector() { + let id = peer::Id::try_from( + [ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ] + .to_vec(), + ) + .unwrap(); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + } + #[test] fn should_be_converted_to_hex_string() { let id = peer::Id(*b"-qB00000000000000000"); @@ -454,7 +599,7 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::request::Announce; + use crate::http::warp_implementation::request::Announce; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs deleted file mode 100644 index d107089cf..000000000 --- a/tests/http/bencode.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub type ByteArray20 = [u8; 20]; - -pub struct InfoHash(ByteArray20); - -impl InfoHash { - pub fn new(vec: &[u8]) -> Self { - let mut byte_array_20: ByteArray20 = Default::default(); - byte_array_20.clone_from_slice(vec); - Self(byte_array_20) - } - - pub fn bytes(&self) -> ByteArray20 { - self.0 - } -} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 87087026f..8c1e3c995 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,7 +1,28 @@ pub mod asserts; -pub mod bencode; pub mod client; pub mod connection_info; pub mod requests; pub mod responses; pub mod server; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index a8ebc95f8..87aa3425f 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -2,12 +2,11 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, @@ -211,11 +210,11 @@ impl QueryParams { let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); Self { - info_hash: Some(percent_encoding::percent_encode(&announce_query.info_hash, NON_ALPHANUMERIC).to_string()), + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), peer_addr: Some(announce_query.peer_addr.to_string()), downloaded: Some(announce_query.downloaded.to_string()), uploaded: Some(announce_query.uploaded.to_string()), - peer_id: Some(percent_encoding::percent_encode(&announce_query.peer_id, NON_ALPHANUMERIC).to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), port: Some(announce_query.port.to_string()), left: Some(announce_query.left.to_string()), event, diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs index 6ab46974b..979dad540 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/http/requests/scrape.rs @@ -1,10 +1,9 @@ use std::fmt; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use torrust_tracker::protocol::info_hash::InfoHash; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, @@ -111,7 +110,7 @@ impl QueryParams { let info_hashes = scrape_query .info_hash .iter() - .map(|info_hash_bytes| percent_encoding::percent_encode(info_hash_bytes, NON_ALPHANUMERIC).to_string()) + .map(percent_encode_byte_array) .collect::>(); Self { info_hash: info_hashes } diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index 5bf938ebe..1aea517cf 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::bencode::{ByteArray20, InfoHash}; +use crate::http::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/http/server.rs b/tests/http/server.rs index e48ecd88d..e5266eee5 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -3,6 +3,7 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::http::Version; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::mode::Mode; @@ -13,24 +14,24 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings -pub async fn start_public_http_tracker() -> Server { +pub async fn start_public_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Public; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings -pub async fn start_whitelisted_http_tracker() -> Server { +pub async fn start_whitelisted_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings -pub async fn start_private_http_tracker() -> Server { +pub async fn start_private_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Private; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with a wildcard IPV6 address. @@ -40,7 +41,7 @@ pub async fn start_private_http_tracker() -> Server { /// [[http_trackers]] /// bind_address = "[::]:7070" /// ``` -pub async fn start_ipv6_http_tracker() -> Server { +pub async fn start_ipv6_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); // Change socket address to "wildcard address" (unspecified address which means any IP address) @@ -49,7 +50,7 @@ pub async fn start_ipv6_http_tracker() -> Server { let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); configuration.http_trackers[0].bind_address = new_ipv6_socket_address; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with an specific `external_ip`. @@ -58,10 +59,10 @@ pub async fn start_ipv6_http_tracker() -> Server { /// ```text /// external_ip = "2.137.87.41" /// ``` -pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server { +pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.external_ip = Some(external_ip.to_string()); - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker `on_reverse_proxy`. @@ -70,24 +71,24 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server /// ```text /// on_reverse_proxy = true /// ``` -pub async fn start_http_tracker_on_reverse_proxy() -> Server { +pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.on_reverse_proxy = true; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } -pub async fn start_default_http_tracker() -> Server { +pub async fn start_default_http_tracker(version: Version) -> Server { let configuration = tracker_configuration(); - start_custom_http_tracker(configuration.clone()).await + start_custom_http_tracker(configuration.clone(), version).await } pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } -pub async fn start_custom_http_tracker(configuration: Arc) -> Server { +pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { let server = start(&configuration); - http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; + http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone(), version).await; server } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 44bb8609d..60219d9fe 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1,10 +1,18 @@ /// Integration tests for HTTP tracker server /// -/// cargo test `http_tracker_server` -- --nocapture +/// Warp version: +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` +/// +/// Axum version ()WIP): +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` mod common; mod http; -mod http_tracker_server { +mod warp_http_tracker_server { mod for_all_config_modes { @@ -26,6 +34,7 @@ mod http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -46,7 +55,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -61,7 +70,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; @@ -70,7 +79,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; // Without `info_hash` param @@ -111,7 +120,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -133,7 +142,7 @@ mod http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -148,7 +157,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -167,7 +176,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -186,7 +195,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -212,7 +221,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -231,7 +240,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -252,7 +261,7 @@ mod http_tracker_server { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -279,7 +288,7 @@ mod http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -298,7 +307,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()) .announce( @@ -323,7 +332,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -361,7 +370,7 @@ mod http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -388,7 +397,7 @@ mod http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -427,7 +436,7 @@ mod http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -463,7 +472,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -476,7 +485,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -491,7 +500,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -508,7 +517,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -521,7 +530,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -536,7 +545,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -553,7 +562,7 @@ mod http_tracker_server { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -583,7 +592,8 @@ mod http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap()).await; + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -614,9 +624,11 @@ mod http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) - .await; + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Warp, + ) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -647,7 +659,7 @@ mod http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy().await; + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -686,6 +698,7 @@ mod http_tracker_server { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -699,7 +712,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -707,7 +720,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -725,7 +738,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -763,7 +776,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -801,7 +814,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -818,7 +831,7 @@ mod http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -842,7 +855,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -861,7 +874,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker().await; + let http_tracker = start_ipv6_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -885,6 +898,7 @@ mod http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -894,7 +908,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -908,7 +922,7 @@ mod http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -929,6 +943,7 @@ mod http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -941,7 +956,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -970,7 +985,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1020,6 +1035,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; @@ -1033,7 +1049,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let key = http_tracker_server .tracker @@ -1050,7 +1066,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1063,7 +1079,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1081,6 +1097,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; use torrust_tracker::tracker::peer; @@ -1094,7 +1111,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1123,7 +1140,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1165,7 +1182,7 @@ mod http_tracker_server { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1204,59 +1221,1273 @@ mod http_tracker_server { } } -mod percent_encoding { - // todo: these operations are used in the HTTP tracker but they have not been extracted into independent functions. - // These tests document the operations. This behavior could be move to some functions int he future if they are extracted. +mod axum_http_tracker_server { - use std::str::FromStr; + // WIP: migration HTTP from Warp to Axum - use percent_encoding::NON_ALPHANUMERIC; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; + use torrust_tracker::http::Version; - #[test] - fn how_to_encode_an_info_hash() { - let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; - let encoded_info_hash = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); + #[tokio::test] + async fn should_return_the_status() { + // This is a temporary test to test the new Axum HTTP tracker server scaffolding + let http_tracker_server = start_default_http_tracker(Version::Axum).await; - assert_eq!(encoded_info_hash, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); + let response = Client::new(http_tracker_server.get_connection_info()).get("status").await; + + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), "{}"); } - #[test] - fn how_to_decode_an_info_hash() { - let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + mod for_all_config_modes { - let info_hash_bytes = percent_encoding::percent_decode_str(encoded_infohash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)).unwrap(); + mod receiving_an_announce_request { - assert_eq!( - info_hash, - InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() - ); - } + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce - #[test] - fn how_to_encode_a_peer_id() { - let peer_id = peer::Id(*b"-qB00000000000000000"); + use std::net::{IpAddr, Ipv6Addr}; + use std::str::FromStr; - let encoded_peer_id = percent_encoding::percent_encode(&peer_id.0, NON_ALPHANUMERIC).to_string(); + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; - assert_eq!(encoded_peer_id, "%2DqB00000000000000000"); - } + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, + assert_internal_server_error_response, assert_invalid_info_hash_error_response, + assert_invalid_peer_id_error_response, assert_is_announce_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::server::{ + start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, + start_ipv6_http_tracker, start_public_http_tracker, + }; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_a_mandatory_field_is_missing() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_port_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_left_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_event_param_is_invalid() { + // All invalid values are ignored as if the `event` param were empty + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase + "Stopped", // It should be lowercase + "Completed", // It should be lowercase + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; - #[test] - fn how_to_decode_a_peer_id() { - let encoded_peer_id = "%2DqB00000000000000000"; + assert_is_announce_response(response).await; + } + } - let bytes_vec = percent_encoding::percent_decode_str(encoded_peer_id).collect::>(); + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_compact_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; - // Clone peer_id_bytes into fixed length array - let mut peer_id_bytes: [u8; 20] = Default::default(); - peer_id_bytes.clone_from_slice(bytes_vec.as_slice()); + let mut params = QueryBuilder::default().query().params(); - let peer_id = peer::Id(peer_id_bytes); + let invalid_values = ["-1", "1.1", "a"]; - assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain teh previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_empty_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Axum, + ) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(http_tracker_server.get_connection_info()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + // todo: shouldn't be the the leftmost IP address? + // THe application is taken the the rightmost IP address. See function http::filters::peer_addr + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + // code-review: it's not returning the invalid info hash error + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_accept_multiple_infohashes() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let http_tracker = start_ipv6_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + } + } + + mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_allow_announcing_a_whitelisted_torrent() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker_server + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + http_tracker + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + + use crate::http::asserts::{ + assert_invalid_authentication_key_error_response, assert_is_announce_response, + assert_peer_not_authenticated_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_to_authenticated_peers() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let key = http_tracker_server + .tracker + .generate_auth_key(Duration::from_secs(60)) + .await + .unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_peer_not_authenticated_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + // The tracker does not have this key + let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + .announce(&QueryBuilder::default().query()) + .await; + + assert_invalid_authentication_key_error_response(response).await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} } }